1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
35 #include "libiberty.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
82 /* Results from operand parsing worker functions. */
86 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
98 /* Types of processor to assemble for. */
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
124 #endif /* ifndef FPU_DEFAULT */
126 #define streq(a, b) (strcmp (a, b) == 0)
128 static arm_feature_set cpu_variant;
129 static arm_feature_set arm_arch_used;
130 static arm_feature_set thumb_arch_used;
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26 = FALSE;
134 static int atpcs = FALSE;
135 static int support_interwork = FALSE;
136 static int uses_apcs_float = FALSE;
137 static int pic_code = FALSE;
138 static int fix_v4bx = FALSE;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated = TRUE;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
213 static const arm_feature_set arm_cext_iwmmxt2 =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218 ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230 ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
244 static int mfloat_abi_opt = -1;
245 /* Record user cpu selection for object attributes. */
246 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
247 /* Must be long enough to hold any of the names in arm_cpus. */
248 static char selected_cpu_name[16];
250 /* Return if no cpu was selected on command-line. */
252 no_cpu_selected (void)
254 return selected_cpu.core == arm_arch_none.core
255 && selected_cpu.coproc == arm_arch_none.coproc;
260 static int meabi_flags = EABI_DEFAULT;
262 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
265 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
270 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
275 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
276 symbolS * GOT_symbol;
279 /* 0: assemble for ARM,
280 1: assemble for Thumb,
281 2: assemble for Thumb even though target CPU does not support thumb
283 static int thumb_mode = 0;
284 /* A value distinct from the possible values for thumb_mode that we
285 can use to record whether thumb_mode has been copied into the
286 tc_frag_data field of a frag. */
287 #define MODE_RECORDED (1 << 4)
289 /* Specifies the intrinsic IT insn behavior mode. */
290 enum implicit_it_mode
292 IMPLICIT_IT_MODE_NEVER = 0x00,
293 IMPLICIT_IT_MODE_ARM = 0x01,
294 IMPLICIT_IT_MODE_THUMB = 0x02,
295 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
297 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
299 /* If unified_syntax is true, we are processing the new unified
300 ARM/Thumb syntax. Important differences from the old ARM mode:
302 - Immediate operands do not require a # prefix.
303 - Conditional affixes always appear at the end of the
304 instruction. (For backward compatibility, those instructions
305 that formerly had them in the middle, continue to accept them
307 - The IT instruction may appear, and if it does is validated
308 against subsequent conditional affixes. It does not generate
311 Important differences from the old Thumb mode:
313 - Immediate operands do not require a # prefix.
314 - Most of the V6T2 instructions are only available in unified mode.
315 - The .N and .W suffixes are recognized and honored (it is an error
316 if they cannot be honored).
317 - All instructions set the flags if and only if they have an 's' affix.
318 - Conditional affixes may be used. They are validated against
319 preceding IT instructions. Unlike ARM mode, you cannot use a
320 conditional affix except in the scope of an IT instruction. */
322 static bfd_boolean unified_syntax = FALSE;
337 enum neon_el_type type;
341 #define NEON_MAX_TYPE_ELS 4
345 struct neon_type_el el[NEON_MAX_TYPE_ELS];
349 enum it_instruction_type
354 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
355 if inside, should be the last one. */
356 NEUTRAL_IT_INSN, /* This could be either inside or outside,
357 i.e. BKPT and NOP. */
358 IT_INSN /* The IT insn has been parsed. */
361 /* The maximum number of operands we need. */
362 #define ARM_IT_MAX_OPERANDS 6
367 unsigned long instruction;
371 /* "uncond_value" is set to the value in place of the conditional field in
372 unconditional versions of the instruction, or -1 if nothing is
375 struct neon_type vectype;
376 /* This does not indicate an actual NEON instruction, only that
377 the mnemonic accepts neon-style type suffixes. */
379 /* Set to the opcode if the instruction needs relaxation.
380 Zero if the instruction is not relaxed. */
384 bfd_reloc_code_real_type type;
389 enum it_instruction_type it_insn_type;
395 struct neon_type_el vectype;
396 unsigned present : 1; /* Operand present. */
397 unsigned isreg : 1; /* Operand was a register. */
398 unsigned immisreg : 1; /* .imm field is a second register. */
399 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
400 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
401 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
402 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
403 instructions. This allows us to disambiguate ARM <-> vector insns. */
404 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
405 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
406 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
407 unsigned issingle : 1; /* Operand is VFP single-precision register. */
408 unsigned hasreloc : 1; /* Operand has relocation suffix. */
409 unsigned writeback : 1; /* Operand has trailing ! */
410 unsigned preind : 1; /* Preindexed address. */
411 unsigned postind : 1; /* Postindexed address. */
412 unsigned negative : 1; /* Index register was negated. */
413 unsigned shifted : 1; /* Shift applied to operation. */
414 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
415 } operands[ARM_IT_MAX_OPERANDS];
418 static struct arm_it inst;
420 #define NUM_FLOAT_VALS 8
422 const char * fp_const[] =
424 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
427 /* Number of littlenums required to hold an extended precision number. */
428 #define MAX_LITTLENUMS 6
430 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
440 #define CP_T_X 0x00008000
441 #define CP_T_Y 0x00400000
443 #define CONDS_BIT 0x00100000
444 #define LOAD_BIT 0x00100000
446 #define DOUBLE_LOAD_FLAG 0x00000001
450 const char * template_name;
454 #define COND_ALWAYS 0xE
458 const char * template_name;
462 struct asm_barrier_opt
464 const char * template_name;
468 /* The bit that distinguishes CPSR and SPSR. */
469 #define SPSR_BIT (1 << 22)
471 /* The individual PSR flag bits. */
472 #define PSR_c (1 << 16)
473 #define PSR_x (1 << 17)
474 #define PSR_s (1 << 18)
475 #define PSR_f (1 << 19)
480 bfd_reloc_code_real_type reloc;
485 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
486 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
491 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
494 /* Bits for DEFINED field in neon_typed_alias. */
495 #define NTA_HASTYPE 1
496 #define NTA_HASINDEX 2
498 struct neon_typed_alias
500 unsigned char defined;
502 struct neon_type_el eltype;
505 /* ARM register categories. This includes coprocessor numbers and various
506 architecture extensions' registers. */
533 /* Structure for a hash table entry for a register.
534 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
535 information which states whether a vector type or index is specified (for a
536 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
542 unsigned char builtin;
543 struct neon_typed_alias * neon;
546 /* Diagnostics used when we don't get a register of the expected type. */
547 const char * const reg_expected_msgs[] =
549 N_("ARM register expected"),
550 N_("bad or missing co-processor number"),
551 N_("co-processor register expected"),
552 N_("FPA register expected"),
553 N_("VFP single precision register expected"),
554 N_("VFP/Neon double precision register expected"),
555 N_("Neon quad precision register expected"),
556 N_("VFP single or double precision register expected"),
557 N_("Neon double or quad precision register expected"),
558 N_("VFP single, double or Neon quad precision register expected"),
559 N_("VFP system register expected"),
560 N_("Maverick MVF register expected"),
561 N_("Maverick MVD register expected"),
562 N_("Maverick MVFX register expected"),
563 N_("Maverick MVDX register expected"),
564 N_("Maverick MVAX register expected"),
565 N_("Maverick DSPSC register expected"),
566 N_("iWMMXt data register expected"),
567 N_("iWMMXt control register expected"),
568 N_("iWMMXt scalar register expected"),
569 N_("XScale accumulator register expected"),
572 /* Some well known registers that we refer to directly elsewhere. */
578 /* ARM instructions take 4bytes in the object file, Thumb instructions
584 /* Basic string to match. */
585 const char * template_name;
587 /* Parameters to instruction. */
588 unsigned int operands[8];
590 /* Conditional tag - see opcode_lookup. */
591 unsigned int tag : 4;
593 /* Basic instruction code. */
594 unsigned int avalue : 28;
596 /* Thumb-format instruction code. */
599 /* Which architecture variant provides this instruction. */
600 const arm_feature_set * avariant;
601 const arm_feature_set * tvariant;
603 /* Function to call to encode instruction in ARM format. */
604 void (* aencode) (void);
606 /* Function to call to encode instruction in Thumb format. */
607 void (* tencode) (void);
610 /* Defines for various bits that we will want to toggle. */
611 #define INST_IMMEDIATE 0x02000000
612 #define OFFSET_REG 0x02000000
613 #define HWOFFSET_IMM 0x00400000
614 #define SHIFT_BY_REG 0x00000010
615 #define PRE_INDEX 0x01000000
616 #define INDEX_UP 0x00800000
617 #define WRITE_BACK 0x00200000
618 #define LDM_TYPE_2_OR_3 0x00400000
619 #define CPSI_MMOD 0x00020000
621 #define LITERAL_MASK 0xf000f000
622 #define OPCODE_MASK 0xfe1fffff
623 #define V4_STR_BIT 0x00000020
625 #define T2_SUBS_PC_LR 0xf3de8f00
627 #define DATA_OP_SHIFT 21
629 #define T2_OPCODE_MASK 0xfe1fffff
630 #define T2_DATA_OP_SHIFT 21
632 #define A_COND_MASK 0xf0000000
633 #define A_PUSH_POP_OP_MASK 0x0fff0000
635 /* Opcodes for pushing/poping registers to/from the stack. */
636 #define A1_OPCODE_PUSH 0x092d0000
637 #define A2_OPCODE_PUSH 0x052d0004
638 #define A2_OPCODE_POP 0x049d0004
640 /* Codes to distinguish the arithmetic instructions. */
651 #define OPCODE_CMP 10
652 #define OPCODE_CMN 11
653 #define OPCODE_ORR 12
654 #define OPCODE_MOV 13
655 #define OPCODE_BIC 14
656 #define OPCODE_MVN 15
658 #define T2_OPCODE_AND 0
659 #define T2_OPCODE_BIC 1
660 #define T2_OPCODE_ORR 2
661 #define T2_OPCODE_ORN 3
662 #define T2_OPCODE_EOR 4
663 #define T2_OPCODE_ADD 8
664 #define T2_OPCODE_ADC 10
665 #define T2_OPCODE_SBC 11
666 #define T2_OPCODE_SUB 13
667 #define T2_OPCODE_RSB 14
669 #define T_OPCODE_MUL 0x4340
670 #define T_OPCODE_TST 0x4200
671 #define T_OPCODE_CMN 0x42c0
672 #define T_OPCODE_NEG 0x4240
673 #define T_OPCODE_MVN 0x43c0
675 #define T_OPCODE_ADD_R3 0x1800
676 #define T_OPCODE_SUB_R3 0x1a00
677 #define T_OPCODE_ADD_HI 0x4400
678 #define T_OPCODE_ADD_ST 0xb000
679 #define T_OPCODE_SUB_ST 0xb080
680 #define T_OPCODE_ADD_SP 0xa800
681 #define T_OPCODE_ADD_PC 0xa000
682 #define T_OPCODE_ADD_I8 0x3000
683 #define T_OPCODE_SUB_I8 0x3800
684 #define T_OPCODE_ADD_I3 0x1c00
685 #define T_OPCODE_SUB_I3 0x1e00
687 #define T_OPCODE_ASR_R 0x4100
688 #define T_OPCODE_LSL_R 0x4080
689 #define T_OPCODE_LSR_R 0x40c0
690 #define T_OPCODE_ROR_R 0x41c0
691 #define T_OPCODE_ASR_I 0x1000
692 #define T_OPCODE_LSL_I 0x0000
693 #define T_OPCODE_LSR_I 0x0800
695 #define T_OPCODE_MOV_I8 0x2000
696 #define T_OPCODE_CMP_I8 0x2800
697 #define T_OPCODE_CMP_LR 0x4280
698 #define T_OPCODE_MOV_HR 0x4600
699 #define T_OPCODE_CMP_HR 0x4500
701 #define T_OPCODE_LDR_PC 0x4800
702 #define T_OPCODE_LDR_SP 0x9800
703 #define T_OPCODE_STR_SP 0x9000
704 #define T_OPCODE_LDR_IW 0x6800
705 #define T_OPCODE_STR_IW 0x6000
706 #define T_OPCODE_LDR_IH 0x8800
707 #define T_OPCODE_STR_IH 0x8000
708 #define T_OPCODE_LDR_IB 0x7800
709 #define T_OPCODE_STR_IB 0x7000
710 #define T_OPCODE_LDR_RW 0x5800
711 #define T_OPCODE_STR_RW 0x5000
712 #define T_OPCODE_LDR_RH 0x5a00
713 #define T_OPCODE_STR_RH 0x5200
714 #define T_OPCODE_LDR_RB 0x5c00
715 #define T_OPCODE_STR_RB 0x5400
717 #define T_OPCODE_PUSH 0xb400
718 #define T_OPCODE_POP 0xbc00
720 #define T_OPCODE_BRANCH 0xe000
722 #define THUMB_SIZE 2 /* Size of thumb instruction. */
723 #define THUMB_PP_PC_LR 0x0100
724 #define THUMB_LOAD_BIT 0x0800
725 #define THUMB2_LOAD_BIT 0x00100000
727 #define BAD_ARGS _("bad arguments to instruction")
728 #define BAD_SP _("r13 not allowed here")
729 #define BAD_PC _("r15 not allowed here")
730 #define BAD_COND _("instruction cannot be conditional")
731 #define BAD_OVERLAP _("registers may not be the same")
732 #define BAD_HIREG _("lo register required")
733 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
734 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
735 #define BAD_BRANCH _("branch must be last instruction in IT block")
736 #define BAD_NOT_IT _("instruction not allowed in IT block")
737 #define BAD_FPU _("selected FPU does not support instruction")
738 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
739 #define BAD_IT_COND _("incorrect condition in IT block")
740 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
741 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
742 #define BAD_PC_ADDRESSING \
743 _("cannot use register index with PC-relative addressing")
744 #define BAD_PC_WRITEBACK \
745 _("cannot use writeback with PC-relative addressing")
746 #define BAD_RANGE _("branch out of range")
748 static struct hash_control * arm_ops_hsh;
749 static struct hash_control * arm_cond_hsh;
750 static struct hash_control * arm_shift_hsh;
751 static struct hash_control * arm_psr_hsh;
752 static struct hash_control * arm_v7m_psr_hsh;
753 static struct hash_control * arm_reg_hsh;
754 static struct hash_control * arm_reloc_hsh;
755 static struct hash_control * arm_barrier_opt_hsh;
757 /* Stuff needed to resolve the label ambiguity
766 symbolS * last_label_seen;
767 static int label_is_thumb_function_name = FALSE;
769 /* Literal pool structure. Held on a per-section
770 and per-sub-section basis. */
772 #define MAX_LITERAL_POOL_SIZE 1024
773 typedef struct literal_pool
775 expressionS literals [MAX_LITERAL_POOL_SIZE];
776 unsigned int next_free_entry;
782 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
784 struct literal_pool * next;
787 /* Pointer to a linked list of literal pools. */
788 literal_pool * list_of_pools = NULL;
791 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
793 static struct current_it now_it;
797 now_it_compatible (int cond)
799 return (cond & ~1) == (now_it.cc & ~1);
803 conditional_insn (void)
805 return inst.cond != COND_ALWAYS;
808 static int in_it_block (void);
810 static int handle_it_state (void);
812 static void force_automatic_it_block_close (void);
814 static void it_fsm_post_encode (void);
816 #define set_it_insn_type(type) \
819 inst.it_insn_type = type; \
820 if (handle_it_state () == FAIL) \
825 #define set_it_insn_type_nonvoid(type, failret) \
828 inst.it_insn_type = type; \
829 if (handle_it_state () == FAIL) \
834 #define set_it_insn_type_last() \
837 if (inst.cond == COND_ALWAYS) \
838 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
840 set_it_insn_type (INSIDE_IT_LAST_INSN); \
846 /* This array holds the chars that always start a comment. If the
847 pre-processor is disabled, these aren't very useful. */
848 const char comment_chars[] = "@";
850 /* This array holds the chars that only start a comment at the beginning of
851 a line. If the line seems to have the form '# 123 filename'
852 .line and .file directives will appear in the pre-processed output. */
853 /* Note that input_file.c hand checks for '#' at the beginning of the
854 first line of the input file. This is because the compiler outputs
855 #NO_APP at the beginning of its output. */
856 /* Also note that comments like this one will always work. */
857 const char line_comment_chars[] = "#";
859 const char line_separator_chars[] = ";";
861 /* Chars that can be used to separate mant
862 from exp in floating point numbers. */
863 const char EXP_CHARS[] = "eE";
865 /* Chars that mean this number is a floating point constant. */
869 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
871 /* Prefix characters that indicate the start of an immediate
873 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
875 /* Separator character handling. */
877 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
880 skip_past_char (char ** str, char c)
891 #define skip_past_comma(str) skip_past_char (str, ',')
893 /* Arithmetic expressions (possibly involving symbols). */
895 /* Return TRUE if anything in the expression is a bignum. */
898 walk_no_bignums (symbolS * sp)
900 if (symbol_get_value_expression (sp)->X_op == O_big)
903 if (symbol_get_value_expression (sp)->X_add_symbol)
905 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
906 || (symbol_get_value_expression (sp)->X_op_symbol
907 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
913 static int in_my_get_expression = 0;
915 /* Third argument to my_get_expression. */
916 #define GE_NO_PREFIX 0
917 #define GE_IMM_PREFIX 1
918 #define GE_OPT_PREFIX 2
919 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
920 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
921 #define GE_OPT_PREFIX_BIG 3
924 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
929 /* In unified syntax, all prefixes are optional. */
931 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
936 case GE_NO_PREFIX: break;
938 if (!is_immediate_prefix (**str))
940 inst.error = _("immediate expression requires a # prefix");
946 case GE_OPT_PREFIX_BIG:
947 if (is_immediate_prefix (**str))
953 memset (ep, 0, sizeof (expressionS));
955 save_in = input_line_pointer;
956 input_line_pointer = *str;
957 in_my_get_expression = 1;
958 seg = expression (ep);
959 in_my_get_expression = 0;
961 if (ep->X_op == O_illegal || ep->X_op == O_absent)
963 /* We found a bad or missing expression in md_operand(). */
964 *str = input_line_pointer;
965 input_line_pointer = save_in;
966 if (inst.error == NULL)
967 inst.error = (ep->X_op == O_absent
968 ? _("missing expression") :_("bad expression"));
973 if (seg != absolute_section
974 && seg != text_section
975 && seg != data_section
976 && seg != bss_section
977 && seg != undefined_section)
979 inst.error = _("bad segment");
980 *str = input_line_pointer;
981 input_line_pointer = save_in;
988 /* Get rid of any bignums now, so that we don't generate an error for which
989 we can't establish a line number later on. Big numbers are never valid
990 in instructions, which is where this routine is always called. */
991 if (prefix_mode != GE_OPT_PREFIX_BIG
992 && (ep->X_op == O_big
994 && (walk_no_bignums (ep->X_add_symbol)
996 && walk_no_bignums (ep->X_op_symbol))))))
998 inst.error = _("invalid constant");
999 *str = input_line_pointer;
1000 input_line_pointer = save_in;
1004 *str = input_line_pointer;
1005 input_line_pointer = save_in;
1009 /* Turn a string in input_line_pointer into a floating point constant
1010 of type TYPE, and store the appropriate bytes in *LITP. The number
1011 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1012 returned, or NULL on OK.
1014 Note that fp constants aren't represent in the normal way on the ARM.
1015 In big endian mode, things are as expected. However, in little endian
1016 mode fp constants are big-endian word-wise, and little-endian byte-wise
1017 within the words. For example, (double) 1.1 in big endian mode is
1018 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1019 the byte sequence 99 99 f1 3f 9a 99 99 99.
1021 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1024 md_atof (int type, char * litP, int * sizeP)
1027 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1059 return _("Unrecognized or unsupported floating point constant");
1062 t = atof_ieee (input_line_pointer, type, words);
1064 input_line_pointer = t;
1065 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1067 if (target_big_endian)
1069 for (i = 0; i < prec; i++)
1071 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1072 litP += sizeof (LITTLENUM_TYPE);
1077 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1078 for (i = prec - 1; i >= 0; i--)
1080 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1081 litP += sizeof (LITTLENUM_TYPE);
1084 /* For a 4 byte float the order of elements in `words' is 1 0.
1085 For an 8 byte float the order is 1 0 3 2. */
1086 for (i = 0; i < prec; i += 2)
1088 md_number_to_chars (litP, (valueT) words[i + 1],
1089 sizeof (LITTLENUM_TYPE));
1090 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1091 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1092 litP += 2 * sizeof (LITTLENUM_TYPE);
1099 /* We handle all bad expressions here, so that we can report the faulty
1100 instruction in the error message. */
1102 md_operand (expressionS * exp)
1104 if (in_my_get_expression)
1105 exp->X_op = O_illegal;
1108 /* Immediate values. */
1110 /* Generic immediate-value read function for use in directives.
1111 Accepts anything that 'expression' can fold to a constant.
1112 *val receives the number. */
1115 immediate_for_directive (int *val)
1118 exp.X_op = O_illegal;
1120 if (is_immediate_prefix (*input_line_pointer))
1122 input_line_pointer++;
1126 if (exp.X_op != O_constant)
1128 as_bad (_("expected #constant"));
1129 ignore_rest_of_line ();
1132 *val = exp.X_add_number;
1137 /* Register parsing. */
1139 /* Generic register parser. CCP points to what should be the
1140 beginning of a register name. If it is indeed a valid register
1141 name, advance CCP over it and return the reg_entry structure;
1142 otherwise return NULL. Does not issue diagnostics. */
1144 static struct reg_entry *
1145 arm_reg_parse_multi (char **ccp)
1149 struct reg_entry *reg;
1151 #ifdef REGISTER_PREFIX
1152 if (*start != REGISTER_PREFIX)
1156 #ifdef OPTIONAL_REGISTER_PREFIX
1157 if (*start == OPTIONAL_REGISTER_PREFIX)
1162 if (!ISALPHA (*p) || !is_name_beginner (*p))
1167 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1169 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1179 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1180 enum arm_reg_type type)
1182 /* Alternative syntaxes are accepted for a few register classes. */
1189 /* Generic coprocessor register names are allowed for these. */
1190 if (reg && reg->type == REG_TYPE_CN)
1195 /* For backward compatibility, a bare number is valid here. */
1197 unsigned long processor = strtoul (start, ccp, 10);
1198 if (*ccp != start && processor <= 15)
1202 case REG_TYPE_MMXWC:
1203 /* WC includes WCG. ??? I'm not sure this is true for all
1204 instructions that take WC registers. */
1205 if (reg && reg->type == REG_TYPE_MMXWCG)
1216 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1217 return value is the register number or FAIL. */
1220 arm_reg_parse (char **ccp, enum arm_reg_type type)
1223 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1226 /* Do not allow a scalar (reg+index) to parse as a register. */
1227 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1230 if (reg && reg->type == type)
1233 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1240 /* Parse a Neon type specifier. *STR should point at the leading '.'
1241 character. Does no verification at this stage that the type fits the opcode
1248 Can all be legally parsed by this function.
1250 Fills in neon_type struct pointer with parsed information, and updates STR
1251 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1252 type, FAIL if not. */
1255 parse_neon_type (struct neon_type *type, char **str)
1262 while (type->elems < NEON_MAX_TYPE_ELS)
1264 enum neon_el_type thistype = NT_untyped;
1265 unsigned thissize = -1u;
1272 /* Just a size without an explicit type. */
1276 switch (TOLOWER (*ptr))
1278 case 'i': thistype = NT_integer; break;
1279 case 'f': thistype = NT_float; break;
1280 case 'p': thistype = NT_poly; break;
1281 case 's': thistype = NT_signed; break;
1282 case 'u': thistype = NT_unsigned; break;
1284 thistype = NT_float;
1289 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1295 /* .f is an abbreviation for .f32. */
1296 if (thistype == NT_float && !ISDIGIT (*ptr))
1301 thissize = strtoul (ptr, &ptr, 10);
1303 if (thissize != 8 && thissize != 16 && thissize != 32
1306 as_bad (_("bad size %d in type specifier"), thissize);
1314 type->el[type->elems].type = thistype;
1315 type->el[type->elems].size = thissize;
1320 /* Empty/missing type is not a successful parse. */
1321 if (type->elems == 0)
1329 /* Errors may be set multiple times during parsing or bit encoding
1330 (particularly in the Neon bits), but usually the earliest error which is set
1331 will be the most meaningful. Avoid overwriting it with later (cascading)
1332 errors by calling this function. */
1335 first_error (const char *err)
1341 /* Parse a single type, e.g. ".s32", leading period included. */
1343 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1346 struct neon_type optype;
1350 if (parse_neon_type (&optype, &str) == SUCCESS)
1352 if (optype.elems == 1)
1353 *vectype = optype.el[0];
1356 first_error (_("only one type should be specified for operand"));
1362 first_error (_("vector type expected"));
1374 /* Special meanings for indices (which have a range of 0-7), which will fit into
1377 #define NEON_ALL_LANES 15
1378 #define NEON_INTERLEAVE_LANES 14
1380 /* Parse either a register or a scalar, with an optional type. Return the
1381 register number, and optionally fill in the actual type of the register
1382 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1383 type/index information in *TYPEINFO. */
1386 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1387 enum arm_reg_type *rtype,
1388 struct neon_typed_alias *typeinfo)
1391 struct reg_entry *reg = arm_reg_parse_multi (&str);
1392 struct neon_typed_alias atype;
1393 struct neon_type_el parsetype;
1397 atype.eltype.type = NT_invtype;
1398 atype.eltype.size = -1;
1400 /* Try alternate syntax for some types of register. Note these are mutually
1401 exclusive with the Neon syntax extensions. */
1404 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1412 /* Undo polymorphism when a set of register types may be accepted. */
1413 if ((type == REG_TYPE_NDQ
1414 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1415 || (type == REG_TYPE_VFSD
1416 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1417 || (type == REG_TYPE_NSDQ
1418 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1419 || reg->type == REG_TYPE_NQ))
1420 || (type == REG_TYPE_MMXWC
1421 && (reg->type == REG_TYPE_MMXWCG)))
1422 type = (enum arm_reg_type) reg->type;
1424 if (type != reg->type)
1430 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1432 if ((atype.defined & NTA_HASTYPE) != 0)
1434 first_error (_("can't redefine type for operand"));
1437 atype.defined |= NTA_HASTYPE;
1438 atype.eltype = parsetype;
1441 if (skip_past_char (&str, '[') == SUCCESS)
1443 if (type != REG_TYPE_VFD)
1445 first_error (_("only D registers may be indexed"));
1449 if ((atype.defined & NTA_HASINDEX) != 0)
1451 first_error (_("can't change index for operand"));
1455 atype.defined |= NTA_HASINDEX;
1457 if (skip_past_char (&str, ']') == SUCCESS)
1458 atype.index = NEON_ALL_LANES;
1463 my_get_expression (&exp, &str, GE_NO_PREFIX);
1465 if (exp.X_op != O_constant)
1467 first_error (_("constant expression required"));
1471 if (skip_past_char (&str, ']') == FAIL)
1474 atype.index = exp.X_add_number;
1489 /* Like arm_reg_parse, but allow allow the following extra features:
1490 - If RTYPE is non-zero, return the (possibly restricted) type of the
1491 register (e.g. Neon double or quad reg when either has been requested).
1492 - If this is a Neon vector type with additional type information, fill
1493 in the struct pointed to by VECTYPE (if non-NULL).
1494 This function will fault on encountering a scalar. */
1497 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1498 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1500 struct neon_typed_alias atype;
1502 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1507 /* Do not allow regname(... to parse as a register. */
1511 /* Do not allow a scalar (reg+index) to parse as a register. */
1512 if ((atype.defined & NTA_HASINDEX) != 0)
1514 first_error (_("register operand expected, but got scalar"));
1519 *vectype = atype.eltype;
1526 #define NEON_SCALAR_REG(X) ((X) >> 4)
1527 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1529 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1530 have enough information to be able to do a good job bounds-checking. So, we
1531 just do easy checks here, and do further checks later. */
1534 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1538 struct neon_typed_alias atype;
1540 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1542 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1545 if (atype.index == NEON_ALL_LANES)
1547 first_error (_("scalar must have an index"));
1550 else if (atype.index >= 64 / elsize)
1552 first_error (_("scalar index out of range"));
1557 *type = atype.eltype;
1561 return reg * 16 + atype.index;
1564 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1567 parse_reg_list (char ** strp)
1569 char * str = * strp;
1573 /* We come back here if we get ranges concatenated by '+' or '|'. */
1588 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1590 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1600 first_error (_("bad range in register list"));
1604 for (i = cur_reg + 1; i < reg; i++)
1606 if (range & (1 << i))
1608 (_("Warning: duplicated register (r%d) in register list"),
1616 if (range & (1 << reg))
1617 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1619 else if (reg <= cur_reg)
1620 as_tsktsk (_("Warning: register range not in ascending order"));
1625 while (skip_past_comma (&str) != FAIL
1626 || (in_range = 1, *str++ == '-'));
1631 first_error (_("missing `}'"));
1639 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1642 if (exp.X_op == O_constant)
1644 if (exp.X_add_number
1645 != (exp.X_add_number & 0x0000ffff))
1647 inst.error = _("invalid register mask");
1651 if ((range & exp.X_add_number) != 0)
1653 int regno = range & exp.X_add_number;
1656 regno = (1 << regno) - 1;
1658 (_("Warning: duplicated register (r%d) in register list"),
1662 range |= exp.X_add_number;
1666 if (inst.reloc.type != 0)
1668 inst.error = _("expression too complex");
1672 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1673 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1674 inst.reloc.pc_rel = 0;
1678 if (*str == '|' || *str == '+')
1684 while (another_range);
1690 /* Types of registers in a list. */
1699 /* Parse a VFP register list. If the string is invalid return FAIL.
1700 Otherwise return the number of registers, and set PBASE to the first
1701 register. Parses registers of type ETYPE.
1702 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1703 - Q registers can be used to specify pairs of D registers
1704 - { } can be omitted from around a singleton register list
1705 FIXME: This is not implemented, as it would require backtracking in
1708 This could be done (the meaning isn't really ambiguous), but doesn't
1709 fit in well with the current parsing framework.
1710 - 32 D registers may be used (also true for VFPv3).
1711 FIXME: Types are ignored in these register lists, which is probably a
1715 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1720 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1724 unsigned long mask = 0;
1729 inst.error = _("expecting {");
1738 regtype = REG_TYPE_VFS;
1743 regtype = REG_TYPE_VFD;
1746 case REGLIST_NEON_D:
1747 regtype = REG_TYPE_NDQ;
1751 if (etype != REGLIST_VFP_S)
1753 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1754 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1758 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1761 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1768 base_reg = max_regs;
1772 int setmask = 1, addregs = 1;
1774 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
1776 if (new_base == FAIL)
1778 first_error (_(reg_expected_msgs[regtype]));
1782 if (new_base >= max_regs)
1784 first_error (_("register out of range in list"));
1788 /* Note: a value of 2 * n is returned for the register Q<n>. */
1789 if (regtype == REG_TYPE_NQ)
1795 if (new_base < base_reg)
1796 base_reg = new_base;
1798 if (mask & (setmask << new_base))
1800 first_error (_("invalid register list"));
1804 if ((mask >> new_base) != 0 && ! warned)
1806 as_tsktsk (_("register list not in ascending order"));
1810 mask |= setmask << new_base;
1813 if (*str == '-') /* We have the start of a range expression */
1819 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1822 inst.error = gettext (reg_expected_msgs[regtype]);
1826 if (high_range >= max_regs)
1828 first_error (_("register out of range in list"));
1832 if (regtype == REG_TYPE_NQ)
1833 high_range = high_range + 1;
1835 if (high_range <= new_base)
1837 inst.error = _("register range not in ascending order");
1841 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1843 if (mask & (setmask << new_base))
1845 inst.error = _("invalid register list");
1849 mask |= setmask << new_base;
1854 while (skip_past_comma (&str) != FAIL);
1858 /* Sanity check -- should have raised a parse error above. */
1859 if (count == 0 || count > max_regs)
1864 /* Final test -- the registers must be consecutive. */
1866 for (i = 0; i < count; i++)
1868 if ((mask & (1u << i)) == 0)
1870 inst.error = _("non-contiguous register range");
1880 /* True if two alias types are the same. */
1883 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1891 if (a->defined != b->defined)
1894 if ((a->defined & NTA_HASTYPE) != 0
1895 && (a->eltype.type != b->eltype.type
1896 || a->eltype.size != b->eltype.size))
1899 if ((a->defined & NTA_HASINDEX) != 0
1900 && (a->index != b->index))
1906 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1907 The base register is put in *PBASE.
1908 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1910 The register stride (minus one) is put in bit 4 of the return value.
1911 Bits [6:5] encode the list length (minus one).
1912 The type of the list elements is put in *ELTYPE, if non-NULL. */
1914 #define NEON_LANE(X) ((X) & 0xf)
1915 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1916 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1919 parse_neon_el_struct_list (char **str, unsigned *pbase,
1920 struct neon_type_el *eltype)
1927 int leading_brace = 0;
1928 enum arm_reg_type rtype = REG_TYPE_NDQ;
1929 const char *const incr_error = _("register stride must be 1 or 2");
1930 const char *const type_error = _("mismatched element/structure types in list");
1931 struct neon_typed_alias firsttype;
1933 if (skip_past_char (&ptr, '{') == SUCCESS)
1938 struct neon_typed_alias atype;
1939 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1943 first_error (_(reg_expected_msgs[rtype]));
1950 if (rtype == REG_TYPE_NQ)
1956 else if (reg_incr == -1)
1958 reg_incr = getreg - base_reg;
1959 if (reg_incr < 1 || reg_incr > 2)
1961 first_error (_(incr_error));
1965 else if (getreg != base_reg + reg_incr * count)
1967 first_error (_(incr_error));
1971 if (! neon_alias_types_same (&atype, &firsttype))
1973 first_error (_(type_error));
1977 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1981 struct neon_typed_alias htype;
1982 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1984 lane = NEON_INTERLEAVE_LANES;
1985 else if (lane != NEON_INTERLEAVE_LANES)
1987 first_error (_(type_error));
1992 else if (reg_incr != 1)
1994 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1998 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2001 first_error (_(reg_expected_msgs[rtype]));
2004 if (! neon_alias_types_same (&htype, &firsttype))
2006 first_error (_(type_error));
2009 count += hireg + dregs - getreg;
2013 /* If we're using Q registers, we can't use [] or [n] syntax. */
2014 if (rtype == REG_TYPE_NQ)
2020 if ((atype.defined & NTA_HASINDEX) != 0)
2024 else if (lane != atype.index)
2026 first_error (_(type_error));
2030 else if (lane == -1)
2031 lane = NEON_INTERLEAVE_LANES;
2032 else if (lane != NEON_INTERLEAVE_LANES)
2034 first_error (_(type_error));
2039 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2041 /* No lane set by [x]. We must be interleaving structures. */
2043 lane = NEON_INTERLEAVE_LANES;
2046 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2047 || (count > 1 && reg_incr == -1))
2049 first_error (_("error parsing element/structure list"));
2053 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2055 first_error (_("expected }"));
2063 *eltype = firsttype.eltype;
2068 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2071 /* Parse an explicit relocation suffix on an expression. This is
2072 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2073 arm_reloc_hsh contains no entries, so this function can only
2074 succeed if there is no () after the word. Returns -1 on error,
2075 BFD_RELOC_UNUSED if there wasn't any suffix. */
2078 parse_reloc (char **str)
2080 struct reloc_entry *r;
2084 return BFD_RELOC_UNUSED;
2089 while (*q && *q != ')' && *q != ',')
2094 if ((r = (struct reloc_entry *)
2095 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2102 /* Directives: register aliases. */
2104 static struct reg_entry *
2105 insert_reg_alias (char *str, unsigned number, int type)
2107 struct reg_entry *new_reg;
2110 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2112 if (new_reg->builtin)
2113 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2115 /* Only warn about a redefinition if it's not defined as the
2117 else if (new_reg->number != number || new_reg->type != type)
2118 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2123 name = xstrdup (str);
2124 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2126 new_reg->name = name;
2127 new_reg->number = number;
2128 new_reg->type = type;
2129 new_reg->builtin = FALSE;
2130 new_reg->neon = NULL;
2132 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2139 insert_neon_reg_alias (char *str, int number, int type,
2140 struct neon_typed_alias *atype)
2142 struct reg_entry *reg = insert_reg_alias (str, number, type);
2146 first_error (_("attempt to redefine typed alias"));
2152 reg->neon = (struct neon_typed_alias *)
2153 xmalloc (sizeof (struct neon_typed_alias));
2154 *reg->neon = *atype;
2158 /* Look for the .req directive. This is of the form:
2160 new_register_name .req existing_register_name
2162 If we find one, or if it looks sufficiently like one that we want to
2163 handle any error here, return TRUE. Otherwise return FALSE. */
2166 create_register_alias (char * newname, char *p)
2168 struct reg_entry *old;
2169 char *oldname, *nbuf;
2172 /* The input scrubber ensures that whitespace after the mnemonic is
2173 collapsed to single spaces. */
2175 if (strncmp (oldname, " .req ", 6) != 0)
2179 if (*oldname == '\0')
2182 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2185 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2189 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2190 the desired alias name, and p points to its end. If not, then
2191 the desired alias name is in the global original_case_string. */
2192 #ifdef TC_CASE_SENSITIVE
2195 newname = original_case_string;
2196 nlen = strlen (newname);
2199 nbuf = (char *) alloca (nlen + 1);
2200 memcpy (nbuf, newname, nlen);
2203 /* Create aliases under the new name as stated; an all-lowercase
2204 version of the new name; and an all-uppercase version of the new
2206 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2208 for (p = nbuf; *p; p++)
2211 if (strncmp (nbuf, newname, nlen))
2213 /* If this attempt to create an additional alias fails, do not bother
2214 trying to create the all-lower case alias. We will fail and issue
2215 a second, duplicate error message. This situation arises when the
2216 programmer does something like:
2219 The second .req creates the "Foo" alias but then fails to create
2220 the artificial FOO alias because it has already been created by the
2222 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2226 for (p = nbuf; *p; p++)
2229 if (strncmp (nbuf, newname, nlen))
2230 insert_reg_alias (nbuf, old->number, old->type);
2236 /* Create a Neon typed/indexed register alias using directives, e.g.:
2241 These typed registers can be used instead of the types specified after the
2242 Neon mnemonic, so long as all operands given have types. Types can also be
2243 specified directly, e.g.:
2244 vadd d0.s32, d1.s32, d2.s32 */
2247 create_neon_reg_alias (char *newname, char *p)
2249 enum arm_reg_type basetype;
2250 struct reg_entry *basereg;
2251 struct reg_entry mybasereg;
2252 struct neon_type ntype;
2253 struct neon_typed_alias typeinfo;
2254 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2257 typeinfo.defined = 0;
2258 typeinfo.eltype.type = NT_invtype;
2259 typeinfo.eltype.size = -1;
2260 typeinfo.index = -1;
2264 if (strncmp (p, " .dn ", 5) == 0)
2265 basetype = REG_TYPE_VFD;
2266 else if (strncmp (p, " .qn ", 5) == 0)
2267 basetype = REG_TYPE_NQ;
2276 basereg = arm_reg_parse_multi (&p);
2278 if (basereg && basereg->type != basetype)
2280 as_bad (_("bad type for register"));
2284 if (basereg == NULL)
2287 /* Try parsing as an integer. */
2288 my_get_expression (&exp, &p, GE_NO_PREFIX);
2289 if (exp.X_op != O_constant)
2291 as_bad (_("expression must be constant"));
2294 basereg = &mybasereg;
2295 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2301 typeinfo = *basereg->neon;
2303 if (parse_neon_type (&ntype, &p) == SUCCESS)
2305 /* We got a type. */
2306 if (typeinfo.defined & NTA_HASTYPE)
2308 as_bad (_("can't redefine the type of a register alias"));
2312 typeinfo.defined |= NTA_HASTYPE;
2313 if (ntype.elems != 1)
2315 as_bad (_("you must specify a single type only"));
2318 typeinfo.eltype = ntype.el[0];
2321 if (skip_past_char (&p, '[') == SUCCESS)
2324 /* We got a scalar index. */
2326 if (typeinfo.defined & NTA_HASINDEX)
2328 as_bad (_("can't redefine the index of a scalar alias"));
2332 my_get_expression (&exp, &p, GE_NO_PREFIX);
2334 if (exp.X_op != O_constant)
2336 as_bad (_("scalar index must be constant"));
2340 typeinfo.defined |= NTA_HASINDEX;
2341 typeinfo.index = exp.X_add_number;
2343 if (skip_past_char (&p, ']') == FAIL)
2345 as_bad (_("expecting ]"));
2350 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2351 the desired alias name, and p points to its end. If not, then
2352 the desired alias name is in the global original_case_string. */
2353 #ifdef TC_CASE_SENSITIVE
2354 namelen = nameend - newname;
2356 newname = original_case_string;
2357 namelen = strlen (newname);
2360 namebuf = (char *) alloca (namelen + 1);
2361 strncpy (namebuf, newname, namelen);
2362 namebuf[namelen] = '\0';
2364 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2365 typeinfo.defined != 0 ? &typeinfo : NULL);
2367 /* Insert name in all uppercase. */
2368 for (p = namebuf; *p; p++)
2371 if (strncmp (namebuf, newname, namelen))
2372 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2373 typeinfo.defined != 0 ? &typeinfo : NULL);
2375 /* Insert name in all lowercase. */
2376 for (p = namebuf; *p; p++)
2379 if (strncmp (namebuf, newname, namelen))
2380 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2381 typeinfo.defined != 0 ? &typeinfo : NULL);
2386 /* Should never be called, as .req goes between the alias and the
2387 register name, not at the beginning of the line. */
2390 s_req (int a ATTRIBUTE_UNUSED)
2392 as_bad (_("invalid syntax for .req directive"));
2396 s_dn (int a ATTRIBUTE_UNUSED)
2398 as_bad (_("invalid syntax for .dn directive"));
2402 s_qn (int a ATTRIBUTE_UNUSED)
2404 as_bad (_("invalid syntax for .qn directive"));
2407 /* The .unreq directive deletes an alias which was previously defined
2408 by .req. For example:
2414 s_unreq (int a ATTRIBUTE_UNUSED)
2419 name = input_line_pointer;
2421 while (*input_line_pointer != 0
2422 && *input_line_pointer != ' '
2423 && *input_line_pointer != '\n')
2424 ++input_line_pointer;
2426 saved_char = *input_line_pointer;
2427 *input_line_pointer = 0;
2430 as_bad (_("invalid syntax for .unreq directive"));
2433 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2437 as_bad (_("unknown register alias '%s'"), name);
2438 else if (reg->builtin)
2439 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2446 hash_delete (arm_reg_hsh, name, FALSE);
2447 free ((char *) reg->name);
2452 /* Also locate the all upper case and all lower case versions.
2453 Do not complain if we cannot find one or the other as it
2454 was probably deleted above. */
2456 nbuf = strdup (name);
2457 for (p = nbuf; *p; p++)
2459 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2462 hash_delete (arm_reg_hsh, nbuf, FALSE);
2463 free ((char *) reg->name);
2469 for (p = nbuf; *p; p++)
2471 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2474 hash_delete (arm_reg_hsh, nbuf, FALSE);
2475 free ((char *) reg->name);
2485 *input_line_pointer = saved_char;
2486 demand_empty_rest_of_line ();
2489 /* Directives: Instruction set selection. */
2492 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2493 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2494 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2495 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2497 /* Create a new mapping symbol for the transition to STATE. */
2500 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2503 const char * symname;
2510 type = BSF_NO_FLAGS;
2514 type = BSF_NO_FLAGS;
2518 type = BSF_NO_FLAGS;
2524 symbolP = symbol_new (symname, now_seg, value, frag);
2525 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2530 THUMB_SET_FUNC (symbolP, 0);
2531 ARM_SET_THUMB (symbolP, 0);
2532 ARM_SET_INTERWORK (symbolP, support_interwork);
2536 THUMB_SET_FUNC (symbolP, 1);
2537 ARM_SET_THUMB (symbolP, 1);
2538 ARM_SET_INTERWORK (symbolP, support_interwork);
2546 /* Save the mapping symbols for future reference. Also check that
2547 we do not place two mapping symbols at the same offset within a
2548 frag. We'll handle overlap between frags in
2549 check_mapping_symbols.
2551 If .fill or other data filling directive generates zero sized data,
2552 the mapping symbol for the following code will have the same value
2553 as the one generated for the data filling directive. In this case,
2554 we replace the old symbol with the new one at the same address. */
2557 if (frag->tc_frag_data.first_map != NULL)
2559 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2560 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2562 frag->tc_frag_data.first_map = symbolP;
2564 if (frag->tc_frag_data.last_map != NULL)
2566 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2567 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2568 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2570 frag->tc_frag_data.last_map = symbolP;
2573 /* We must sometimes convert a region marked as code to data during
2574 code alignment, if an odd number of bytes have to be padded. The
2575 code mapping symbol is pushed to an aligned address. */
2578 insert_data_mapping_symbol (enum mstate state,
2579 valueT value, fragS *frag, offsetT bytes)
2581 /* If there was already a mapping symbol, remove it. */
2582 if (frag->tc_frag_data.last_map != NULL
2583 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2585 symbolS *symp = frag->tc_frag_data.last_map;
2589 know (frag->tc_frag_data.first_map == symp);
2590 frag->tc_frag_data.first_map = NULL;
2592 frag->tc_frag_data.last_map = NULL;
2593 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2596 make_mapping_symbol (MAP_DATA, value, frag);
2597 make_mapping_symbol (state, value + bytes, frag);
2600 static void mapping_state_2 (enum mstate state, int max_chars);
2602 /* Set the mapping state to STATE. Only call this when about to
2603 emit some STATE bytes to the file. */
2606 mapping_state (enum mstate state)
2608 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2610 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2612 if (mapstate == state)
2613 /* The mapping symbol has already been emitted.
2614 There is nothing else to do. */
2617 if (state == MAP_ARM || state == MAP_THUMB)
2619 All ARM instructions require 4-byte alignment.
2620 (Almost) all Thumb instructions require 2-byte alignment.
2622 When emitting instructions into any section, mark the section
2625 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2626 but themselves require 2-byte alignment; this applies to some
2627 PC- relative forms. However, these cases will invovle implicit
2628 literal pool generation or an explicit .align >=2, both of
2629 which will cause the section to me marked with sufficient
2630 alignment. Thus, we don't handle those cases here. */
2631 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2633 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2634 /* This case will be evaluated later in the next else. */
2636 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2637 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2639 /* Only add the symbol if the offset is > 0:
2640 if we're at the first frag, check it's size > 0;
2641 if we're not at the first frag, then for sure
2642 the offset is > 0. */
2643 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2644 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2647 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2650 mapping_state_2 (state, 0);
2654 /* Same as mapping_state, but MAX_CHARS bytes have already been
2655 allocated. Put the mapping symbol that far back. */
2658 mapping_state_2 (enum mstate state, int max_chars)
2660 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2662 if (!SEG_NORMAL (now_seg))
2665 if (mapstate == state)
2666 /* The mapping symbol has already been emitted.
2667 There is nothing else to do. */
2670 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2671 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2674 #define mapping_state(x) ((void)0)
2675 #define mapping_state_2(x, y) ((void)0)
2678 /* Find the real, Thumb encoded start of a Thumb function. */
2682 find_real_start (symbolS * symbolP)
2685 const char * name = S_GET_NAME (symbolP);
2686 symbolS * new_target;
2688 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2689 #define STUB_NAME ".real_start_of"
2694 /* The compiler may generate BL instructions to local labels because
2695 it needs to perform a branch to a far away location. These labels
2696 do not have a corresponding ".real_start_of" label. We check
2697 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2698 the ".real_start_of" convention for nonlocal branches. */
2699 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2702 real_start = ACONCAT ((STUB_NAME, name, NULL));
2703 new_target = symbol_find (real_start);
2705 if (new_target == NULL)
2707 as_warn (_("Failed to find real start of function: %s\n"), name);
2708 new_target = symbolP;
2716 opcode_select (int width)
2723 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2724 as_bad (_("selected processor does not support THUMB opcodes"));
2727 /* No need to force the alignment, since we will have been
2728 coming from ARM mode, which is word-aligned. */
2729 record_alignment (now_seg, 1);
2736 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2737 as_bad (_("selected processor does not support ARM opcodes"));
2742 frag_align (2, 0, 0);
2744 record_alignment (now_seg, 1);
2749 as_bad (_("invalid instruction size selected (%d)"), width);
2754 s_arm (int ignore ATTRIBUTE_UNUSED)
2757 demand_empty_rest_of_line ();
2761 s_thumb (int ignore ATTRIBUTE_UNUSED)
2764 demand_empty_rest_of_line ();
2768 s_code (int unused ATTRIBUTE_UNUSED)
2772 temp = get_absolute_expression ();
2777 opcode_select (temp);
2781 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2786 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2788 /* If we are not already in thumb mode go into it, EVEN if
2789 the target processor does not support thumb instructions.
2790 This is used by gcc/config/arm/lib1funcs.asm for example
2791 to compile interworking support functions even if the
2792 target processor should not support interworking. */
2796 record_alignment (now_seg, 1);
2799 demand_empty_rest_of_line ();
2803 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2807 /* The following label is the name/address of the start of a Thumb function.
2808 We need to know this for the interworking support. */
2809 label_is_thumb_function_name = TRUE;
2812 /* Perform a .set directive, but also mark the alias as
2813 being a thumb function. */
2816 s_thumb_set (int equiv)
2818 /* XXX the following is a duplicate of the code for s_set() in read.c
2819 We cannot just call that code as we need to get at the symbol that
2826 /* Especial apologies for the random logic:
2827 This just grew, and could be parsed much more simply!
2829 name = input_line_pointer;
2830 delim = get_symbol_end ();
2831 end_name = input_line_pointer;
2834 if (*input_line_pointer != ',')
2837 as_bad (_("expected comma after name \"%s\""), name);
2839 ignore_rest_of_line ();
2843 input_line_pointer++;
2846 if (name[0] == '.' && name[1] == '\0')
2848 /* XXX - this should not happen to .thumb_set. */
2852 if ((symbolP = symbol_find (name)) == NULL
2853 && (symbolP = md_undefined_symbol (name)) == NULL)
2856 /* When doing symbol listings, play games with dummy fragments living
2857 outside the normal fragment chain to record the file and line info
2859 if (listing & LISTING_SYMBOLS)
2861 extern struct list_info_struct * listing_tail;
2862 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2864 memset (dummy_frag, 0, sizeof (fragS));
2865 dummy_frag->fr_type = rs_fill;
2866 dummy_frag->line = listing_tail;
2867 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2868 dummy_frag->fr_symbol = symbolP;
2872 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2875 /* "set" symbols are local unless otherwise specified. */
2876 SF_SET_LOCAL (symbolP);
2877 #endif /* OBJ_COFF */
2878 } /* Make a new symbol. */
2880 symbol_table_insert (symbolP);
2885 && S_IS_DEFINED (symbolP)
2886 && S_GET_SEGMENT (symbolP) != reg_section)
2887 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2889 pseudo_set (symbolP);
2891 demand_empty_rest_of_line ();
2893 /* XXX Now we come to the Thumb specific bit of code. */
2895 THUMB_SET_FUNC (symbolP, 1);
2896 ARM_SET_THUMB (symbolP, 1);
2897 #if defined OBJ_ELF || defined OBJ_COFF
2898 ARM_SET_INTERWORK (symbolP, support_interwork);
2902 /* Directives: Mode selection. */
2904 /* .syntax [unified|divided] - choose the new unified syntax
2905 (same for Arm and Thumb encoding, modulo slight differences in what
2906 can be represented) or the old divergent syntax for each mode. */
2908 s_syntax (int unused ATTRIBUTE_UNUSED)
2912 name = input_line_pointer;
2913 delim = get_symbol_end ();
2915 if (!strcasecmp (name, "unified"))
2916 unified_syntax = TRUE;
2917 else if (!strcasecmp (name, "divided"))
2918 unified_syntax = FALSE;
2921 as_bad (_("unrecognized syntax mode \"%s\""), name);
2924 *input_line_pointer = delim;
2925 demand_empty_rest_of_line ();
2928 /* Directives: sectioning and alignment. */
2930 /* Same as s_align_ptwo but align 0 => align 2. */
2933 s_align (int unused ATTRIBUTE_UNUSED)
2938 long max_alignment = 15;
2940 temp = get_absolute_expression ();
2941 if (temp > max_alignment)
2942 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2945 as_bad (_("alignment negative. 0 assumed."));
2949 if (*input_line_pointer == ',')
2951 input_line_pointer++;
2952 temp_fill = get_absolute_expression ();
2964 /* Only make a frag if we HAVE to. */
2965 if (temp && !need_pass_2)
2967 if (!fill_p && subseg_text_p (now_seg))
2968 frag_align_code (temp, 0);
2970 frag_align (temp, (int) temp_fill, 0);
2972 demand_empty_rest_of_line ();
2974 record_alignment (now_seg, temp);
2978 s_bss (int ignore ATTRIBUTE_UNUSED)
2980 /* We don't support putting frags in the BSS segment, we fake it by
2981 marking in_bss, then looking at s_skip for clues. */
2982 subseg_set (bss_section, 0);
2983 demand_empty_rest_of_line ();
2985 #ifdef md_elf_section_change_hook
2986 md_elf_section_change_hook ();
2991 s_even (int ignore ATTRIBUTE_UNUSED)
2993 /* Never make frag if expect extra pass. */
2995 frag_align (1, 0, 0);
2997 record_alignment (now_seg, 1);
2999 demand_empty_rest_of_line ();
3002 /* Directives: Literal pools. */
3004 static literal_pool *
3005 find_literal_pool (void)
3007 literal_pool * pool;
3009 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3011 if (pool->section == now_seg
3012 && pool->sub_section == now_subseg)
3019 static literal_pool *
3020 find_or_make_literal_pool (void)
3022 /* Next literal pool ID number. */
3023 static unsigned int latest_pool_num = 1;
3024 literal_pool * pool;
3026 pool = find_literal_pool ();
3030 /* Create a new pool. */
3031 pool = (literal_pool *) xmalloc (sizeof (* pool));
3035 pool->next_free_entry = 0;
3036 pool->section = now_seg;
3037 pool->sub_section = now_subseg;
3038 pool->next = list_of_pools;
3039 pool->symbol = NULL;
3041 /* Add it to the list. */
3042 list_of_pools = pool;
3045 /* New pools, and emptied pools, will have a NULL symbol. */
3046 if (pool->symbol == NULL)
3048 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3049 (valueT) 0, &zero_address_frag);
3050 pool->id = latest_pool_num ++;
3057 /* Add the literal in the global 'inst'
3058 structure to the relevant literal pool. */
3061 add_to_lit_pool (void)
3063 literal_pool * pool;
3066 pool = find_or_make_literal_pool ();
3068 /* Check if this literal value is already in the pool. */
3069 for (entry = 0; entry < pool->next_free_entry; entry ++)
3071 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3072 && (inst.reloc.exp.X_op == O_constant)
3073 && (pool->literals[entry].X_add_number
3074 == inst.reloc.exp.X_add_number)
3075 && (pool->literals[entry].X_unsigned
3076 == inst.reloc.exp.X_unsigned))
3079 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3080 && (inst.reloc.exp.X_op == O_symbol)
3081 && (pool->literals[entry].X_add_number
3082 == inst.reloc.exp.X_add_number)
3083 && (pool->literals[entry].X_add_symbol
3084 == inst.reloc.exp.X_add_symbol)
3085 && (pool->literals[entry].X_op_symbol
3086 == inst.reloc.exp.X_op_symbol))
3090 /* Do we need to create a new entry? */
3091 if (entry == pool->next_free_entry)
3093 if (entry >= MAX_LITERAL_POOL_SIZE)
3095 inst.error = _("literal pool overflow");
3099 pool->literals[entry] = inst.reloc.exp;
3101 /* PR ld/12974: Record the location of the first source line to reference
3102 this entry in the literal pool. If it turns out during linking that the
3103 symbol does not exist we will be able to give an accurate line number for
3104 the (first use of the) missing reference. */
3105 if (debug_type == DEBUG_DWARF2)
3106 dwarf2_where (pool->locs + entry);
3108 pool->next_free_entry += 1;
3111 inst.reloc.exp.X_op = O_symbol;
3112 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3113 inst.reloc.exp.X_add_symbol = pool->symbol;
3118 /* Can't use symbol_new here, so have to create a symbol and then at
3119 a later date assign it a value. Thats what these functions do. */
3122 symbol_locate (symbolS * symbolP,
3123 const char * name, /* It is copied, the caller can modify. */
3124 segT segment, /* Segment identifier (SEG_<something>). */
3125 valueT valu, /* Symbol value. */
3126 fragS * frag) /* Associated fragment. */
3128 unsigned int name_length;
3129 char * preserved_copy_of_name;
3131 name_length = strlen (name) + 1; /* +1 for \0. */
3132 obstack_grow (¬es, name, name_length);
3133 preserved_copy_of_name = (char *) obstack_finish (¬es);
3135 #ifdef tc_canonicalize_symbol_name
3136 preserved_copy_of_name =
3137 tc_canonicalize_symbol_name (preserved_copy_of_name);
3140 S_SET_NAME (symbolP, preserved_copy_of_name);
3142 S_SET_SEGMENT (symbolP, segment);
3143 S_SET_VALUE (symbolP, valu);
3144 symbol_clear_list_pointers (symbolP);
3146 symbol_set_frag (symbolP, frag);
3148 /* Link to end of symbol chain. */
3150 extern int symbol_table_frozen;
3152 if (symbol_table_frozen)
3156 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3158 obj_symbol_new_hook (symbolP);
3160 #ifdef tc_symbol_new_hook
3161 tc_symbol_new_hook (symbolP);
3165 verify_symbol_chain (symbol_rootP, symbol_lastP);
3166 #endif /* DEBUG_SYMS */
3171 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3174 literal_pool * pool;
3177 pool = find_literal_pool ();
3179 || pool->symbol == NULL
3180 || pool->next_free_entry == 0)
3183 mapping_state (MAP_DATA);
3185 /* Align pool as you have word accesses.
3186 Only make a frag if we have to. */
3188 frag_align (2, 0, 0);
3190 record_alignment (now_seg, 2);
3192 sprintf (sym_name, "$$lit_\002%x", pool->id);
3194 symbol_locate (pool->symbol, sym_name, now_seg,
3195 (valueT) frag_now_fix (), frag_now);
3196 symbol_table_insert (pool->symbol);
3198 ARM_SET_THUMB (pool->symbol, thumb_mode);
3200 #if defined OBJ_COFF || defined OBJ_ELF
3201 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3204 for (entry = 0; entry < pool->next_free_entry; entry ++)
3207 if (debug_type == DEBUG_DWARF2)
3208 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3210 /* First output the expression in the instruction to the pool. */
3211 emit_expr (&(pool->literals[entry]), 4); /* .word */
3214 /* Mark the pool as empty. */
3215 pool->next_free_entry = 0;
3216 pool->symbol = NULL;
3220 /* Forward declarations for functions below, in the MD interface
3222 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3223 static valueT create_unwind_entry (int);
3224 static void start_unwind_section (const segT, int);
3225 static void add_unwind_opcode (valueT, int);
3226 static void flush_pending_unwind (void);
3228 /* Directives: Data. */
3231 s_arm_elf_cons (int nbytes)
3235 #ifdef md_flush_pending_output
3236 md_flush_pending_output ();
3239 if (is_it_end_of_statement ())
3241 demand_empty_rest_of_line ();
3245 #ifdef md_cons_align
3246 md_cons_align (nbytes);
3249 mapping_state (MAP_DATA);
3253 char *base = input_line_pointer;
3257 if (exp.X_op != O_symbol)
3258 emit_expr (&exp, (unsigned int) nbytes);
3261 char *before_reloc = input_line_pointer;
3262 reloc = parse_reloc (&input_line_pointer);
3265 as_bad (_("unrecognized relocation suffix"));
3266 ignore_rest_of_line ();
3269 else if (reloc == BFD_RELOC_UNUSED)
3270 emit_expr (&exp, (unsigned int) nbytes);
3273 reloc_howto_type *howto = (reloc_howto_type *)
3274 bfd_reloc_type_lookup (stdoutput,
3275 (bfd_reloc_code_real_type) reloc);
3276 int size = bfd_get_reloc_size (howto);
3278 if (reloc == BFD_RELOC_ARM_PLT32)
3280 as_bad (_("(plt) is only valid on branch targets"));
3281 reloc = BFD_RELOC_UNUSED;
3286 as_bad (_("%s relocations do not fit in %d bytes"),
3287 howto->name, nbytes);
3290 /* We've parsed an expression stopping at O_symbol.
3291 But there may be more expression left now that we
3292 have parsed the relocation marker. Parse it again.
3293 XXX Surely there is a cleaner way to do this. */
3294 char *p = input_line_pointer;
3296 char *save_buf = (char *) alloca (input_line_pointer - base);
3297 memcpy (save_buf, base, input_line_pointer - base);
3298 memmove (base + (input_line_pointer - before_reloc),
3299 base, before_reloc - base);
3301 input_line_pointer = base + (input_line_pointer-before_reloc);
3303 memcpy (base, save_buf, p - base);
3305 offset = nbytes - size;
3306 p = frag_more ((int) nbytes);
3307 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3308 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3313 while (*input_line_pointer++ == ',');
3315 /* Put terminator back into stream. */
3316 input_line_pointer --;
3317 demand_empty_rest_of_line ();
3320 /* Emit an expression containing a 32-bit thumb instruction.
3321 Implementation based on put_thumb32_insn. */
3324 emit_thumb32_expr (expressionS * exp)
3326 expressionS exp_high = *exp;
3328 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3329 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3330 exp->X_add_number &= 0xffff;
3331 emit_expr (exp, (unsigned int) THUMB_SIZE);
3334 /* Guess the instruction size based on the opcode. */
3337 thumb_insn_size (int opcode)
3339 if ((unsigned int) opcode < 0xe800u)
3341 else if ((unsigned int) opcode >= 0xe8000000u)
3348 emit_insn (expressionS *exp, int nbytes)
3352 if (exp->X_op == O_constant)
3357 size = thumb_insn_size (exp->X_add_number);
3361 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3363 as_bad (_(".inst.n operand too big. "\
3364 "Use .inst.w instead"));
3369 if (now_it.state == AUTOMATIC_IT_BLOCK)
3370 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3372 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3374 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3375 emit_thumb32_expr (exp);
3377 emit_expr (exp, (unsigned int) size);
3379 it_fsm_post_encode ();
3383 as_bad (_("cannot determine Thumb instruction size. " \
3384 "Use .inst.n/.inst.w instead"));
3387 as_bad (_("constant expression required"));
3392 /* Like s_arm_elf_cons but do not use md_cons_align and
3393 set the mapping state to MAP_ARM/MAP_THUMB. */
3396 s_arm_elf_inst (int nbytes)
3398 if (is_it_end_of_statement ())
3400 demand_empty_rest_of_line ();
3404 /* Calling mapping_state () here will not change ARM/THUMB,
3405 but will ensure not to be in DATA state. */
3408 mapping_state (MAP_THUMB);
3413 as_bad (_("width suffixes are invalid in ARM mode"));
3414 ignore_rest_of_line ();
3420 mapping_state (MAP_ARM);
3429 if (! emit_insn (& exp, nbytes))
3431 ignore_rest_of_line ();
3435 while (*input_line_pointer++ == ',');
3437 /* Put terminator back into stream. */
3438 input_line_pointer --;
3439 demand_empty_rest_of_line ();
3442 /* Parse a .rel31 directive. */
3445 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3452 if (*input_line_pointer == '1')
3453 highbit = 0x80000000;
3454 else if (*input_line_pointer != '0')
3455 as_bad (_("expected 0 or 1"));
3457 input_line_pointer++;
3458 if (*input_line_pointer != ',')
3459 as_bad (_("missing comma"));
3460 input_line_pointer++;
3462 #ifdef md_flush_pending_output
3463 md_flush_pending_output ();
3466 #ifdef md_cons_align
3470 mapping_state (MAP_DATA);
3475 md_number_to_chars (p, highbit, 4);
3476 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3477 BFD_RELOC_ARM_PREL31);
3479 demand_empty_rest_of_line ();
3482 /* Directives: AEABI stack-unwind tables. */
3484 /* Parse an unwind_fnstart directive. Simply records the current location. */
3487 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3489 demand_empty_rest_of_line ();
3490 if (unwind.proc_start)
3492 as_bad (_("duplicate .fnstart directive"));
3496 /* Mark the start of the function. */
3497 unwind.proc_start = expr_build_dot ();
3499 /* Reset the rest of the unwind info. */
3500 unwind.opcode_count = 0;
3501 unwind.table_entry = NULL;
3502 unwind.personality_routine = NULL;
3503 unwind.personality_index = -1;
3504 unwind.frame_size = 0;
3505 unwind.fp_offset = 0;
3506 unwind.fp_reg = REG_SP;
3508 unwind.sp_restored = 0;
3512 /* Parse a handlerdata directive. Creates the exception handling table entry
3513 for the function. */
3516 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3518 demand_empty_rest_of_line ();
3519 if (!unwind.proc_start)
3520 as_bad (MISSING_FNSTART);
3522 if (unwind.table_entry)
3523 as_bad (_("duplicate .handlerdata directive"));
3525 create_unwind_entry (1);
3528 /* Parse an unwind_fnend directive. Generates the index table entry. */
3531 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3536 unsigned int marked_pr_dependency;
3538 demand_empty_rest_of_line ();
3540 if (!unwind.proc_start)
3542 as_bad (_(".fnend directive without .fnstart"));
3546 /* Add eh table entry. */
3547 if (unwind.table_entry == NULL)
3548 val = create_unwind_entry (0);
3552 /* Add index table entry. This is two words. */
3553 start_unwind_section (unwind.saved_seg, 1);
3554 frag_align (2, 0, 0);
3555 record_alignment (now_seg, 2);
3557 ptr = frag_more (8);
3559 where = frag_now_fix () - 8;
3561 /* Self relative offset of the function start. */
3562 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3563 BFD_RELOC_ARM_PREL31);
3565 /* Indicate dependency on EHABI-defined personality routines to the
3566 linker, if it hasn't been done already. */
3567 marked_pr_dependency
3568 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3569 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3570 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3572 static const char *const name[] =
3574 "__aeabi_unwind_cpp_pr0",
3575 "__aeabi_unwind_cpp_pr1",
3576 "__aeabi_unwind_cpp_pr2"
3578 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3579 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3580 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3581 |= 1 << unwind.personality_index;
3585 /* Inline exception table entry. */
3586 md_number_to_chars (ptr + 4, val, 4);
3588 /* Self relative offset of the table entry. */
3589 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3590 BFD_RELOC_ARM_PREL31);
3592 /* Restore the original section. */
3593 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3595 unwind.proc_start = NULL;
3599 /* Parse an unwind_cantunwind directive. */
3602 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3604 demand_empty_rest_of_line ();
3605 if (!unwind.proc_start)
3606 as_bad (MISSING_FNSTART);
3608 if (unwind.personality_routine || unwind.personality_index != -1)
3609 as_bad (_("personality routine specified for cantunwind frame"));
3611 unwind.personality_index = -2;
3615 /* Parse a personalityindex directive. */
3618 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3622 if (!unwind.proc_start)
3623 as_bad (MISSING_FNSTART);
3625 if (unwind.personality_routine || unwind.personality_index != -1)
3626 as_bad (_("duplicate .personalityindex directive"));
3630 if (exp.X_op != O_constant
3631 || exp.X_add_number < 0 || exp.X_add_number > 15)
3633 as_bad (_("bad personality routine number"));
3634 ignore_rest_of_line ();
3638 unwind.personality_index = exp.X_add_number;
3640 demand_empty_rest_of_line ();
3644 /* Parse a personality directive. */
3647 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3651 if (!unwind.proc_start)
3652 as_bad (MISSING_FNSTART);
3654 if (unwind.personality_routine || unwind.personality_index != -1)
3655 as_bad (_("duplicate .personality directive"));
3657 name = input_line_pointer;
3658 c = get_symbol_end ();
3659 p = input_line_pointer;
3660 unwind.personality_routine = symbol_find_or_make (name);
3662 demand_empty_rest_of_line ();
3666 /* Parse a directive saving core registers. */
3669 s_arm_unwind_save_core (void)
3675 range = parse_reg_list (&input_line_pointer);
3678 as_bad (_("expected register list"));
3679 ignore_rest_of_line ();
3683 demand_empty_rest_of_line ();
3685 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3686 into .unwind_save {..., sp...}. We aren't bothered about the value of
3687 ip because it is clobbered by calls. */
3688 if (unwind.sp_restored && unwind.fp_reg == 12
3689 && (range & 0x3000) == 0x1000)
3691 unwind.opcode_count--;
3692 unwind.sp_restored = 0;
3693 range = (range | 0x2000) & ~0x1000;
3694 unwind.pending_offset = 0;
3700 /* See if we can use the short opcodes. These pop a block of up to 8
3701 registers starting with r4, plus maybe r14. */
3702 for (n = 0; n < 8; n++)
3704 /* Break at the first non-saved register. */
3705 if ((range & (1 << (n + 4))) == 0)
3708 /* See if there are any other bits set. */
3709 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3711 /* Use the long form. */
3712 op = 0x8000 | ((range >> 4) & 0xfff);
3713 add_unwind_opcode (op, 2);
3717 /* Use the short form. */
3719 op = 0xa8; /* Pop r14. */
3721 op = 0xa0; /* Do not pop r14. */
3723 add_unwind_opcode (op, 1);
3730 op = 0xb100 | (range & 0xf);
3731 add_unwind_opcode (op, 2);
3734 /* Record the number of bytes pushed. */
3735 for (n = 0; n < 16; n++)
3737 if (range & (1 << n))
3738 unwind.frame_size += 4;
3743 /* Parse a directive saving FPA registers. */
3746 s_arm_unwind_save_fpa (int reg)
3752 /* Get Number of registers to transfer. */
3753 if (skip_past_comma (&input_line_pointer) != FAIL)
3756 exp.X_op = O_illegal;
3758 if (exp.X_op != O_constant)
3760 as_bad (_("expected , <constant>"));
3761 ignore_rest_of_line ();
3765 num_regs = exp.X_add_number;
3767 if (num_regs < 1 || num_regs > 4)
3769 as_bad (_("number of registers must be in the range [1:4]"));
3770 ignore_rest_of_line ();
3774 demand_empty_rest_of_line ();
3779 op = 0xb4 | (num_regs - 1);
3780 add_unwind_opcode (op, 1);
3785 op = 0xc800 | (reg << 4) | (num_regs - 1);
3786 add_unwind_opcode (op, 2);
3788 unwind.frame_size += num_regs * 12;
3792 /* Parse a directive saving VFP registers for ARMv6 and above. */
3795 s_arm_unwind_save_vfp_armv6 (void)
3800 int num_vfpv3_regs = 0;
3801 int num_regs_below_16;
3803 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3806 as_bad (_("expected register list"));
3807 ignore_rest_of_line ();
3811 demand_empty_rest_of_line ();
3813 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3814 than FSTMX/FLDMX-style ones). */
3816 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3818 num_vfpv3_regs = count;
3819 else if (start + count > 16)
3820 num_vfpv3_regs = start + count - 16;
3822 if (num_vfpv3_regs > 0)
3824 int start_offset = start > 16 ? start - 16 : 0;
3825 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3826 add_unwind_opcode (op, 2);
3829 /* Generate opcode for registers numbered in the range 0 .. 15. */
3830 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3831 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3832 if (num_regs_below_16 > 0)
3834 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3835 add_unwind_opcode (op, 2);
3838 unwind.frame_size += count * 8;
3842 /* Parse a directive saving VFP registers for pre-ARMv6. */
3845 s_arm_unwind_save_vfp (void)
3851 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
3854 as_bad (_("expected register list"));
3855 ignore_rest_of_line ();
3859 demand_empty_rest_of_line ();
3864 op = 0xb8 | (count - 1);
3865 add_unwind_opcode (op, 1);
3870 op = 0xb300 | (reg << 4) | (count - 1);
3871 add_unwind_opcode (op, 2);
3873 unwind.frame_size += count * 8 + 4;
3877 /* Parse a directive saving iWMMXt data registers. */
3880 s_arm_unwind_save_mmxwr (void)
3888 if (*input_line_pointer == '{')
3889 input_line_pointer++;
3893 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3897 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3902 as_tsktsk (_("register list not in ascending order"));
3905 if (*input_line_pointer == '-')
3907 input_line_pointer++;
3908 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3911 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3914 else if (reg >= hi_reg)
3916 as_bad (_("bad register range"));
3919 for (; reg < hi_reg; reg++)
3923 while (skip_past_comma (&input_line_pointer) != FAIL);
3925 if (*input_line_pointer == '}')
3926 input_line_pointer++;
3928 demand_empty_rest_of_line ();
3930 /* Generate any deferred opcodes because we're going to be looking at
3932 flush_pending_unwind ();
3934 for (i = 0; i < 16; i++)
3936 if (mask & (1 << i))
3937 unwind.frame_size += 8;
3940 /* Attempt to combine with a previous opcode. We do this because gcc
3941 likes to output separate unwind directives for a single block of
3943 if (unwind.opcode_count > 0)
3945 i = unwind.opcodes[unwind.opcode_count - 1];
3946 if ((i & 0xf8) == 0xc0)
3949 /* Only merge if the blocks are contiguous. */
3952 if ((mask & 0xfe00) == (1 << 9))
3954 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3955 unwind.opcode_count--;
3958 else if (i == 6 && unwind.opcode_count >= 2)
3960 i = unwind.opcodes[unwind.opcode_count - 2];
3964 op = 0xffff << (reg - 1);
3966 && ((mask & op) == (1u << (reg - 1))))
3968 op = (1 << (reg + i + 1)) - 1;
3969 op &= ~((1 << reg) - 1);
3971 unwind.opcode_count -= 2;
3978 /* We want to generate opcodes in the order the registers have been
3979 saved, ie. descending order. */
3980 for (reg = 15; reg >= -1; reg--)
3982 /* Save registers in blocks. */
3984 || !(mask & (1 << reg)))
3986 /* We found an unsaved reg. Generate opcodes to save the
3993 op = 0xc0 | (hi_reg - 10);
3994 add_unwind_opcode (op, 1);
3999 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4000 add_unwind_opcode (op, 2);
4009 ignore_rest_of_line ();
4013 s_arm_unwind_save_mmxwcg (void)
4020 if (*input_line_pointer == '{')
4021 input_line_pointer++;
4025 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4029 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4035 as_tsktsk (_("register list not in ascending order"));
4038 if (*input_line_pointer == '-')
4040 input_line_pointer++;
4041 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4044 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4047 else if (reg >= hi_reg)
4049 as_bad (_("bad register range"));
4052 for (; reg < hi_reg; reg++)
4056 while (skip_past_comma (&input_line_pointer) != FAIL);
4058 if (*input_line_pointer == '}')
4059 input_line_pointer++;
4061 demand_empty_rest_of_line ();
4063 /* Generate any deferred opcodes because we're going to be looking at
4065 flush_pending_unwind ();
4067 for (reg = 0; reg < 16; reg++)
4069 if (mask & (1 << reg))
4070 unwind.frame_size += 4;
4073 add_unwind_opcode (op, 2);
4076 ignore_rest_of_line ();
4080 /* Parse an unwind_save directive.
4081 If the argument is non-zero, this is a .vsave directive. */
4084 s_arm_unwind_save (int arch_v6)
4087 struct reg_entry *reg;
4088 bfd_boolean had_brace = FALSE;
4090 if (!unwind.proc_start)
4091 as_bad (MISSING_FNSTART);
4093 /* Figure out what sort of save we have. */
4094 peek = input_line_pointer;
4102 reg = arm_reg_parse_multi (&peek);
4106 as_bad (_("register expected"));
4107 ignore_rest_of_line ();
4116 as_bad (_("FPA .unwind_save does not take a register list"));
4117 ignore_rest_of_line ();
4120 input_line_pointer = peek;
4121 s_arm_unwind_save_fpa (reg->number);
4124 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4127 s_arm_unwind_save_vfp_armv6 ();
4129 s_arm_unwind_save_vfp ();
4131 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4132 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4135 as_bad (_(".unwind_save does not support this kind of register"));
4136 ignore_rest_of_line ();
4141 /* Parse an unwind_movsp directive. */
4144 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4150 if (!unwind.proc_start)
4151 as_bad (MISSING_FNSTART);
4153 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4156 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4157 ignore_rest_of_line ();
4161 /* Optional constant. */
4162 if (skip_past_comma (&input_line_pointer) != FAIL)
4164 if (immediate_for_directive (&offset) == FAIL)
4170 demand_empty_rest_of_line ();
4172 if (reg == REG_SP || reg == REG_PC)
4174 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4178 if (unwind.fp_reg != REG_SP)
4179 as_bad (_("unexpected .unwind_movsp directive"));
4181 /* Generate opcode to restore the value. */
4183 add_unwind_opcode (op, 1);
4185 /* Record the information for later. */
4186 unwind.fp_reg = reg;
4187 unwind.fp_offset = unwind.frame_size - offset;
4188 unwind.sp_restored = 1;
4191 /* Parse an unwind_pad directive. */
4194 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4198 if (!unwind.proc_start)
4199 as_bad (MISSING_FNSTART);
4201 if (immediate_for_directive (&offset) == FAIL)
4206 as_bad (_("stack increment must be multiple of 4"));
4207 ignore_rest_of_line ();
4211 /* Don't generate any opcodes, just record the details for later. */
4212 unwind.frame_size += offset;
4213 unwind.pending_offset += offset;
4215 demand_empty_rest_of_line ();
4218 /* Parse an unwind_setfp directive. */
4221 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4227 if (!unwind.proc_start)
4228 as_bad (MISSING_FNSTART);
4230 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4231 if (skip_past_comma (&input_line_pointer) == FAIL)
4234 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4236 if (fp_reg == FAIL || sp_reg == FAIL)
4238 as_bad (_("expected <reg>, <reg>"));
4239 ignore_rest_of_line ();
4243 /* Optional constant. */
4244 if (skip_past_comma (&input_line_pointer) != FAIL)
4246 if (immediate_for_directive (&offset) == FAIL)
4252 demand_empty_rest_of_line ();
4254 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4256 as_bad (_("register must be either sp or set by a previous"
4257 "unwind_movsp directive"));
4261 /* Don't generate any opcodes, just record the information for later. */
4262 unwind.fp_reg = fp_reg;
4264 if (sp_reg == REG_SP)
4265 unwind.fp_offset = unwind.frame_size - offset;
4267 unwind.fp_offset -= offset;
4270 /* Parse an unwind_raw directive. */
4273 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4276 /* This is an arbitrary limit. */
4277 unsigned char op[16];
4280 if (!unwind.proc_start)
4281 as_bad (MISSING_FNSTART);
4284 if (exp.X_op == O_constant
4285 && skip_past_comma (&input_line_pointer) != FAIL)
4287 unwind.frame_size += exp.X_add_number;
4291 exp.X_op = O_illegal;
4293 if (exp.X_op != O_constant)
4295 as_bad (_("expected <offset>, <opcode>"));
4296 ignore_rest_of_line ();
4302 /* Parse the opcode. */
4307 as_bad (_("unwind opcode too long"));
4308 ignore_rest_of_line ();
4310 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4312 as_bad (_("invalid unwind opcode"));
4313 ignore_rest_of_line ();
4316 op[count++] = exp.X_add_number;
4318 /* Parse the next byte. */
4319 if (skip_past_comma (&input_line_pointer) == FAIL)
4325 /* Add the opcode bytes in reverse order. */
4327 add_unwind_opcode (op[count], 1);
4329 demand_empty_rest_of_line ();
4333 /* Parse a .eabi_attribute directive. */
4336 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4338 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4340 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4341 attributes_set_explicitly[tag] = 1;
4344 /* Emit a tls fix for the symbol. */
4347 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4351 #ifdef md_flush_pending_output
4352 md_flush_pending_output ();
4355 #ifdef md_cons_align
4359 /* Since we're just labelling the code, there's no need to define a
4362 p = obstack_next_free (&frchain_now->frch_obstack);
4363 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4364 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4365 : BFD_RELOC_ARM_TLS_DESCSEQ);
4367 #endif /* OBJ_ELF */
4369 static void s_arm_arch (int);
4370 static void s_arm_object_arch (int);
4371 static void s_arm_cpu (int);
4372 static void s_arm_fpu (int);
4373 static void s_arm_arch_extension (int);
4378 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4385 if (exp.X_op == O_symbol)
4386 exp.X_op = O_secrel;
4388 emit_expr (&exp, 4);
4390 while (*input_line_pointer++ == ',');
4392 input_line_pointer--;
4393 demand_empty_rest_of_line ();
4397 /* This table describes all the machine specific pseudo-ops the assembler
4398 has to support. The fields are:
4399 pseudo-op name without dot
4400 function to call to execute this pseudo-op
4401 Integer arg to pass to the function. */
4403 const pseudo_typeS md_pseudo_table[] =
4405 /* Never called because '.req' does not start a line. */
4406 { "req", s_req, 0 },
4407 /* Following two are likewise never called. */
4410 { "unreq", s_unreq, 0 },
4411 { "bss", s_bss, 0 },
4412 { "align", s_align, 0 },
4413 { "arm", s_arm, 0 },
4414 { "thumb", s_thumb, 0 },
4415 { "code", s_code, 0 },
4416 { "force_thumb", s_force_thumb, 0 },
4417 { "thumb_func", s_thumb_func, 0 },
4418 { "thumb_set", s_thumb_set, 0 },
4419 { "even", s_even, 0 },
4420 { "ltorg", s_ltorg, 0 },
4421 { "pool", s_ltorg, 0 },
4422 { "syntax", s_syntax, 0 },
4423 { "cpu", s_arm_cpu, 0 },
4424 { "arch", s_arm_arch, 0 },
4425 { "object_arch", s_arm_object_arch, 0 },
4426 { "fpu", s_arm_fpu, 0 },
4427 { "arch_extension", s_arm_arch_extension, 0 },
4429 { "word", s_arm_elf_cons, 4 },
4430 { "long", s_arm_elf_cons, 4 },
4431 { "inst.n", s_arm_elf_inst, 2 },
4432 { "inst.w", s_arm_elf_inst, 4 },
4433 { "inst", s_arm_elf_inst, 0 },
4434 { "rel31", s_arm_rel31, 0 },
4435 { "fnstart", s_arm_unwind_fnstart, 0 },
4436 { "fnend", s_arm_unwind_fnend, 0 },
4437 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4438 { "personality", s_arm_unwind_personality, 0 },
4439 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4440 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4441 { "save", s_arm_unwind_save, 0 },
4442 { "vsave", s_arm_unwind_save, 1 },
4443 { "movsp", s_arm_unwind_movsp, 0 },
4444 { "pad", s_arm_unwind_pad, 0 },
4445 { "setfp", s_arm_unwind_setfp, 0 },
4446 { "unwind_raw", s_arm_unwind_raw, 0 },
4447 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4448 { "tlsdescseq", s_arm_tls_descseq, 0 },
4452 /* These are used for dwarf. */
4456 /* These are used for dwarf2. */
4457 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4458 { "loc", dwarf2_directive_loc, 0 },
4459 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4461 { "extend", float_cons, 'x' },
4462 { "ldouble", float_cons, 'x' },
4463 { "packed", float_cons, 'p' },
4465 {"secrel32", pe_directive_secrel, 0},
4470 /* Parser functions used exclusively in instruction operands. */
4472 /* Generic immediate-value read function for use in insn parsing.
4473 STR points to the beginning of the immediate (the leading #);
4474 VAL receives the value; if the value is outside [MIN, MAX]
4475 issue an error. PREFIX_OPT is true if the immediate prefix is
4479 parse_immediate (char **str, int *val, int min, int max,
4480 bfd_boolean prefix_opt)
4483 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4484 if (exp.X_op != O_constant)
4486 inst.error = _("constant expression required");
4490 if (exp.X_add_number < min || exp.X_add_number > max)
4492 inst.error = _("immediate value out of range");
4496 *val = exp.X_add_number;
4500 /* Less-generic immediate-value read function with the possibility of loading a
4501 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4502 instructions. Puts the result directly in inst.operands[i]. */
4505 parse_big_immediate (char **str, int i)
4510 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4512 if (exp.X_op == O_constant)
4514 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4515 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4516 O_constant. We have to be careful not to break compilation for
4517 32-bit X_add_number, though. */
4518 if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4520 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4521 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4522 inst.operands[i].regisimm = 1;
4525 else if (exp.X_op == O_big
4526 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4528 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4530 /* Bignums have their least significant bits in
4531 generic_bignum[0]. Make sure we put 32 bits in imm and
4532 32 bits in reg, in a (hopefully) portable way. */
4533 gas_assert (parts != 0);
4535 /* Make sure that the number is not too big.
4536 PR 11972: Bignums can now be sign-extended to the
4537 size of a .octa so check that the out of range bits
4538 are all zero or all one. */
4539 if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4541 LITTLENUM_TYPE m = -1;
4543 if (generic_bignum[parts * 2] != 0
4544 && generic_bignum[parts * 2] != m)
4547 for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4548 if (generic_bignum[j] != generic_bignum[j-1])
4552 inst.operands[i].imm = 0;
4553 for (j = 0; j < parts; j++, idx++)
4554 inst.operands[i].imm |= generic_bignum[idx]
4555 << (LITTLENUM_NUMBER_OF_BITS * j);
4556 inst.operands[i].reg = 0;
4557 for (j = 0; j < parts; j++, idx++)
4558 inst.operands[i].reg |= generic_bignum[idx]
4559 << (LITTLENUM_NUMBER_OF_BITS * j);
4560 inst.operands[i].regisimm = 1;
4570 /* Returns the pseudo-register number of an FPA immediate constant,
4571 or FAIL if there isn't a valid constant here. */
4574 parse_fpa_immediate (char ** str)
4576 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4582 /* First try and match exact strings, this is to guarantee
4583 that some formats will work even for cross assembly. */
4585 for (i = 0; fp_const[i]; i++)
4587 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4591 *str += strlen (fp_const[i]);
4592 if (is_end_of_line[(unsigned char) **str])
4598 /* Just because we didn't get a match doesn't mean that the constant
4599 isn't valid, just that it is in a format that we don't
4600 automatically recognize. Try parsing it with the standard
4601 expression routines. */
4603 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4605 /* Look for a raw floating point number. */
4606 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4607 && is_end_of_line[(unsigned char) *save_in])
4609 for (i = 0; i < NUM_FLOAT_VALS; i++)
4611 for (j = 0; j < MAX_LITTLENUMS; j++)
4613 if (words[j] != fp_values[i][j])
4617 if (j == MAX_LITTLENUMS)
4625 /* Try and parse a more complex expression, this will probably fail
4626 unless the code uses a floating point prefix (eg "0f"). */
4627 save_in = input_line_pointer;
4628 input_line_pointer = *str;
4629 if (expression (&exp) == absolute_section
4630 && exp.X_op == O_big
4631 && exp.X_add_number < 0)
4633 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4635 if (gen_to_words (words, 5, (long) 15) == 0)
4637 for (i = 0; i < NUM_FLOAT_VALS; i++)
4639 for (j = 0; j < MAX_LITTLENUMS; j++)
4641 if (words[j] != fp_values[i][j])
4645 if (j == MAX_LITTLENUMS)
4647 *str = input_line_pointer;
4648 input_line_pointer = save_in;
4655 *str = input_line_pointer;
4656 input_line_pointer = save_in;
4657 inst.error = _("invalid FPA immediate expression");
4661 /* Returns 1 if a number has "quarter-precision" float format
4662 0baBbbbbbc defgh000 00000000 00000000. */
4665 is_quarter_float (unsigned imm)
4667 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4668 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4671 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4672 0baBbbbbbc defgh000 00000000 00000000.
4673 The zero and minus-zero cases need special handling, since they can't be
4674 encoded in the "quarter-precision" float format, but can nonetheless be
4675 loaded as integer constants. */
4678 parse_qfloat_immediate (char **ccp, int *immed)
4682 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4683 int found_fpchar = 0;
4685 skip_past_char (&str, '#');
4687 /* We must not accidentally parse an integer as a floating-point number. Make
4688 sure that the value we parse is not an integer by checking for special
4689 characters '.' or 'e'.
4690 FIXME: This is a horrible hack, but doing better is tricky because type
4691 information isn't in a very usable state at parse time. */
4693 skip_whitespace (fpnum);
4695 if (strncmp (fpnum, "0x", 2) == 0)
4699 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4700 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4710 if ((str = atof_ieee (str, 's', words)) != NULL)
4712 unsigned fpword = 0;
4715 /* Our FP word must be 32 bits (single-precision FP). */
4716 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4718 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4722 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4735 /* Shift operands. */
4738 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4741 struct asm_shift_name
4744 enum shift_kind kind;
4747 /* Third argument to parse_shift. */
4748 enum parse_shift_mode
4750 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4751 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4752 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4753 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4754 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4757 /* Parse a <shift> specifier on an ARM data processing instruction.
4758 This has three forms:
4760 (LSL|LSR|ASL|ASR|ROR) Rs
4761 (LSL|LSR|ASL|ASR|ROR) #imm
4764 Note that ASL is assimilated to LSL in the instruction encoding, and
4765 RRX to ROR #0 (which cannot be written as such). */
4768 parse_shift (char **str, int i, enum parse_shift_mode mode)
4770 const struct asm_shift_name *shift_name;
4771 enum shift_kind shift;
4776 for (p = *str; ISALPHA (*p); p++)
4781 inst.error = _("shift expression expected");
4785 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4788 if (shift_name == NULL)
4790 inst.error = _("shift expression expected");
4794 shift = shift_name->kind;
4798 case NO_SHIFT_RESTRICT:
4799 case SHIFT_IMMEDIATE: break;
4801 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4802 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4804 inst.error = _("'LSL' or 'ASR' required");
4809 case SHIFT_LSL_IMMEDIATE:
4810 if (shift != SHIFT_LSL)
4812 inst.error = _("'LSL' required");
4817 case SHIFT_ASR_IMMEDIATE:
4818 if (shift != SHIFT_ASR)
4820 inst.error = _("'ASR' required");
4828 if (shift != SHIFT_RRX)
4830 /* Whitespace can appear here if the next thing is a bare digit. */
4831 skip_whitespace (p);
4833 if (mode == NO_SHIFT_RESTRICT
4834 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4836 inst.operands[i].imm = reg;
4837 inst.operands[i].immisreg = 1;
4839 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4842 inst.operands[i].shift_kind = shift;
4843 inst.operands[i].shifted = 1;
4848 /* Parse a <shifter_operand> for an ARM data processing instruction:
4851 #<immediate>, <rotate>
4855 where <shift> is defined by parse_shift above, and <rotate> is a
4856 multiple of 2 between 0 and 30. Validation of immediate operands
4857 is deferred to md_apply_fix. */
4860 parse_shifter_operand (char **str, int i)
4865 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4867 inst.operands[i].reg = value;
4868 inst.operands[i].isreg = 1;
4870 /* parse_shift will override this if appropriate */
4871 inst.reloc.exp.X_op = O_constant;
4872 inst.reloc.exp.X_add_number = 0;
4874 if (skip_past_comma (str) == FAIL)
4877 /* Shift operation on register. */
4878 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4881 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4884 if (skip_past_comma (str) == SUCCESS)
4886 /* #x, y -- ie explicit rotation by Y. */
4887 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4890 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4892 inst.error = _("constant expression expected");
4896 value = exp.X_add_number;
4897 if (value < 0 || value > 30 || value % 2 != 0)
4899 inst.error = _("invalid rotation");
4902 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4904 inst.error = _("invalid constant");
4908 /* Encode as specified. */
4909 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
4913 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4914 inst.reloc.pc_rel = 0;
4918 /* Group relocation information. Each entry in the table contains the
4919 textual name of the relocation as may appear in assembler source
4920 and must end with a colon.
4921 Along with this textual name are the relocation codes to be used if
4922 the corresponding instruction is an ALU instruction (ADD or SUB only),
4923 an LDR, an LDRS, or an LDC. */
4925 struct group_reloc_table_entry
4936 /* Varieties of non-ALU group relocation. */
4943 static struct group_reloc_table_entry group_reloc_table[] =
4944 { /* Program counter relative: */
4946 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4951 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4952 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4953 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4954 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4956 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4961 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4962 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4963 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4964 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4966 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4967 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4968 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4969 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4970 /* Section base relative */
4972 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4977 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4978 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4979 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4980 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4982 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4987 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4988 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4989 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4990 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4992 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4993 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4994 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4995 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4997 /* Given the address of a pointer pointing to the textual name of a group
4998 relocation as may appear in assembler source, attempt to find its details
4999 in group_reloc_table. The pointer will be updated to the character after
5000 the trailing colon. On failure, FAIL will be returned; SUCCESS
5001 otherwise. On success, *entry will be updated to point at the relevant
5002 group_reloc_table entry. */
5005 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5008 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5010 int length = strlen (group_reloc_table[i].name);
5012 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5013 && (*str)[length] == ':')
5015 *out = &group_reloc_table[i];
5016 *str += (length + 1);
5024 /* Parse a <shifter_operand> for an ARM data processing instruction
5025 (as for parse_shifter_operand) where group relocations are allowed:
5028 #<immediate>, <rotate>
5029 #:<group_reloc>:<expression>
5033 where <group_reloc> is one of the strings defined in group_reloc_table.
5034 The hashes are optional.
5036 Everything else is as for parse_shifter_operand. */
5038 static parse_operand_result
5039 parse_shifter_operand_group_reloc (char **str, int i)
5041 /* Determine if we have the sequence of characters #: or just :
5042 coming next. If we do, then we check for a group relocation.
5043 If we don't, punt the whole lot to parse_shifter_operand. */
5045 if (((*str)[0] == '#' && (*str)[1] == ':')
5046 || (*str)[0] == ':')
5048 struct group_reloc_table_entry *entry;
5050 if ((*str)[0] == '#')
5055 /* Try to parse a group relocation. Anything else is an error. */
5056 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5058 inst.error = _("unknown group relocation");
5059 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5062 /* We now have the group relocation table entry corresponding to
5063 the name in the assembler source. Next, we parse the expression. */
5064 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5065 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5067 /* Record the relocation type (always the ALU variant here). */
5068 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5069 gas_assert (inst.reloc.type != 0);
5071 return PARSE_OPERAND_SUCCESS;
5074 return parse_shifter_operand (str, i) == SUCCESS
5075 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5077 /* Never reached. */
5080 /* Parse a Neon alignment expression. Information is written to
5081 inst.operands[i]. We assume the initial ':' has been skipped.
5083 align .imm = align << 8, .immisalign=1, .preind=0 */
5084 static parse_operand_result
5085 parse_neon_alignment (char **str, int i)
5090 my_get_expression (&exp, &p, GE_NO_PREFIX);
5092 if (exp.X_op != O_constant)
5094 inst.error = _("alignment must be constant");
5095 return PARSE_OPERAND_FAIL;
5098 inst.operands[i].imm = exp.X_add_number << 8;
5099 inst.operands[i].immisalign = 1;
5100 /* Alignments are not pre-indexes. */
5101 inst.operands[i].preind = 0;
5104 return PARSE_OPERAND_SUCCESS;
5107 /* Parse all forms of an ARM address expression. Information is written
5108 to inst.operands[i] and/or inst.reloc.
5110 Preindexed addressing (.preind=1):
5112 [Rn, #offset] .reg=Rn .reloc.exp=offset
5113 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5114 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5115 .shift_kind=shift .reloc.exp=shift_imm
5117 These three may have a trailing ! which causes .writeback to be set also.
5119 Postindexed addressing (.postind=1, .writeback=1):
5121 [Rn], #offset .reg=Rn .reloc.exp=offset
5122 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5123 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5124 .shift_kind=shift .reloc.exp=shift_imm
5126 Unindexed addressing (.preind=0, .postind=0):
5128 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5132 [Rn]{!} shorthand for [Rn,#0]{!}
5133 =immediate .isreg=0 .reloc.exp=immediate
5134 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5136 It is the caller's responsibility to check for addressing modes not
5137 supported by the instruction, and to set inst.reloc.type. */
5139 static parse_operand_result
5140 parse_address_main (char **str, int i, int group_relocations,
5141 group_reloc_type group_type)
5146 if (skip_past_char (&p, '[') == FAIL)
5148 if (skip_past_char (&p, '=') == FAIL)
5150 /* Bare address - translate to PC-relative offset. */
5151 inst.reloc.pc_rel = 1;
5152 inst.operands[i].reg = REG_PC;
5153 inst.operands[i].isreg = 1;
5154 inst.operands[i].preind = 1;
5156 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
5158 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5159 return PARSE_OPERAND_FAIL;
5162 return PARSE_OPERAND_SUCCESS;
5165 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5167 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5168 return PARSE_OPERAND_FAIL;
5170 inst.operands[i].reg = reg;
5171 inst.operands[i].isreg = 1;
5173 if (skip_past_comma (&p) == SUCCESS)
5175 inst.operands[i].preind = 1;
5178 else if (*p == '-') p++, inst.operands[i].negative = 1;
5180 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5182 inst.operands[i].imm = reg;
5183 inst.operands[i].immisreg = 1;
5185 if (skip_past_comma (&p) == SUCCESS)
5186 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5187 return PARSE_OPERAND_FAIL;
5189 else if (skip_past_char (&p, ':') == SUCCESS)
5191 /* FIXME: '@' should be used here, but it's filtered out by generic
5192 code before we get to see it here. This may be subject to
5194 parse_operand_result result = parse_neon_alignment (&p, i);
5196 if (result != PARSE_OPERAND_SUCCESS)
5201 if (inst.operands[i].negative)
5203 inst.operands[i].negative = 0;
5207 if (group_relocations
5208 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5210 struct group_reloc_table_entry *entry;
5212 /* Skip over the #: or : sequence. */
5218 /* Try to parse a group relocation. Anything else is an
5220 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5222 inst.error = _("unknown group relocation");
5223 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5226 /* We now have the group relocation table entry corresponding to
5227 the name in the assembler source. Next, we parse the
5229 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5230 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5232 /* Record the relocation type. */
5236 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5240 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5244 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5251 if (inst.reloc.type == 0)
5253 inst.error = _("this group relocation is not allowed on this instruction");
5254 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5260 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5261 return PARSE_OPERAND_FAIL;
5262 /* If the offset is 0, find out if it's a +0 or -0. */
5263 if (inst.reloc.exp.X_op == O_constant
5264 && inst.reloc.exp.X_add_number == 0)
5266 skip_whitespace (q);
5270 skip_whitespace (q);
5273 inst.operands[i].negative = 1;
5278 else if (skip_past_char (&p, ':') == SUCCESS)
5280 /* FIXME: '@' should be used here, but it's filtered out by generic code
5281 before we get to see it here. This may be subject to change. */
5282 parse_operand_result result = parse_neon_alignment (&p, i);
5284 if (result != PARSE_OPERAND_SUCCESS)
5288 if (skip_past_char (&p, ']') == FAIL)
5290 inst.error = _("']' expected");
5291 return PARSE_OPERAND_FAIL;
5294 if (skip_past_char (&p, '!') == SUCCESS)
5295 inst.operands[i].writeback = 1;
5297 else if (skip_past_comma (&p) == SUCCESS)
5299 if (skip_past_char (&p, '{') == SUCCESS)
5301 /* [Rn], {expr} - unindexed, with option */
5302 if (parse_immediate (&p, &inst.operands[i].imm,
5303 0, 255, TRUE) == FAIL)
5304 return PARSE_OPERAND_FAIL;
5306 if (skip_past_char (&p, '}') == FAIL)
5308 inst.error = _("'}' expected at end of 'option' field");
5309 return PARSE_OPERAND_FAIL;
5311 if (inst.operands[i].preind)
5313 inst.error = _("cannot combine index with option");
5314 return PARSE_OPERAND_FAIL;
5317 return PARSE_OPERAND_SUCCESS;
5321 inst.operands[i].postind = 1;
5322 inst.operands[i].writeback = 1;
5324 if (inst.operands[i].preind)
5326 inst.error = _("cannot combine pre- and post-indexing");
5327 return PARSE_OPERAND_FAIL;
5331 else if (*p == '-') p++, inst.operands[i].negative = 1;
5333 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5335 /* We might be using the immediate for alignment already. If we
5336 are, OR the register number into the low-order bits. */
5337 if (inst.operands[i].immisalign)
5338 inst.operands[i].imm |= reg;
5340 inst.operands[i].imm = reg;
5341 inst.operands[i].immisreg = 1;
5343 if (skip_past_comma (&p) == SUCCESS)
5344 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5345 return PARSE_OPERAND_FAIL;
5350 if (inst.operands[i].negative)
5352 inst.operands[i].negative = 0;
5355 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5356 return PARSE_OPERAND_FAIL;
5357 /* If the offset is 0, find out if it's a +0 or -0. */
5358 if (inst.reloc.exp.X_op == O_constant
5359 && inst.reloc.exp.X_add_number == 0)
5361 skip_whitespace (q);
5365 skip_whitespace (q);
5368 inst.operands[i].negative = 1;
5374 /* If at this point neither .preind nor .postind is set, we have a
5375 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5376 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5378 inst.operands[i].preind = 1;
5379 inst.reloc.exp.X_op = O_constant;
5380 inst.reloc.exp.X_add_number = 0;
5383 return PARSE_OPERAND_SUCCESS;
5387 parse_address (char **str, int i)
5389 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5393 static parse_operand_result
5394 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5396 return parse_address_main (str, i, 1, type);
5399 /* Parse an operand for a MOVW or MOVT instruction. */
5401 parse_half (char **str)
5406 skip_past_char (&p, '#');
5407 if (strncasecmp (p, ":lower16:", 9) == 0)
5408 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5409 else if (strncasecmp (p, ":upper16:", 9) == 0)
5410 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5412 if (inst.reloc.type != BFD_RELOC_UNUSED)
5415 skip_whitespace (p);
5418 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5421 if (inst.reloc.type == BFD_RELOC_UNUSED)
5423 if (inst.reloc.exp.X_op != O_constant)
5425 inst.error = _("constant expression expected");
5428 if (inst.reloc.exp.X_add_number < 0
5429 || inst.reloc.exp.X_add_number > 0xffff)
5431 inst.error = _("immediate value out of range");
5439 /* Miscellaneous. */
5441 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5442 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5444 parse_psr (char **str, bfd_boolean lhs)
5447 unsigned long psr_field;
5448 const struct asm_psr *psr;
5450 bfd_boolean is_apsr = FALSE;
5451 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5453 /* PR gas/12698: If the user has specified -march=all then m_profile will
5454 be TRUE, but we want to ignore it in this case as we are building for any
5455 CPU type, including non-m variants. */
5456 if (selected_cpu.core == arm_arch_any.core)
5459 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5460 feature for ease of use and backwards compatibility. */
5462 if (strncasecmp (p, "SPSR", 4) == 0)
5465 goto unsupported_psr;
5467 psr_field = SPSR_BIT;
5469 else if (strncasecmp (p, "CPSR", 4) == 0)
5472 goto unsupported_psr;
5476 else if (strncasecmp (p, "APSR", 4) == 0)
5478 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5479 and ARMv7-R architecture CPUs. */
5488 while (ISALNUM (*p) || *p == '_');
5490 if (strncasecmp (start, "iapsr", 5) == 0
5491 || strncasecmp (start, "eapsr", 5) == 0
5492 || strncasecmp (start, "xpsr", 4) == 0
5493 || strncasecmp (start, "psr", 3) == 0)
5494 p = start + strcspn (start, "rR") + 1;
5496 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5502 /* If APSR is being written, a bitfield may be specified. Note that
5503 APSR itself is handled above. */
5504 if (psr->field <= 3)
5506 psr_field = psr->field;
5512 /* M-profile MSR instructions have the mask field set to "10", except
5513 *PSR variants which modify APSR, which may use a different mask (and
5514 have been handled already). Do that by setting the PSR_f field
5516 return psr->field | (lhs ? PSR_f : 0);
5519 goto unsupported_psr;
5525 /* A suffix follows. */
5531 while (ISALNUM (*p) || *p == '_');
5535 /* APSR uses a notation for bits, rather than fields. */
5536 unsigned int nzcvq_bits = 0;
5537 unsigned int g_bit = 0;
5540 for (bit = start; bit != p; bit++)
5542 switch (TOLOWER (*bit))
5545 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5549 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5553 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5557 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5561 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5565 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5569 inst.error = _("unexpected bit specified after APSR");
5574 if (nzcvq_bits == 0x1f)
5579 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5581 inst.error = _("selected processor does not "
5582 "support DSP extension");
5589 if ((nzcvq_bits & 0x20) != 0
5590 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5591 || (g_bit & 0x2) != 0)
5593 inst.error = _("bad bitmask specified after APSR");
5599 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5604 psr_field |= psr->field;
5610 goto error; /* Garbage after "[CS]PSR". */
5612 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5613 is deprecated, but allow it anyway. */
5617 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5620 else if (!m_profile)
5621 /* These bits are never right for M-profile devices: don't set them
5622 (only code paths which read/write APSR reach here). */
5623 psr_field |= (PSR_c | PSR_f);
5629 inst.error = _("selected processor does not support requested special "
5630 "purpose register");
5634 inst.error = _("flag for {c}psr instruction expected");
5638 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5639 value suitable for splatting into the AIF field of the instruction. */
5642 parse_cps_flags (char **str)
5651 case '\0': case ',':
5654 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5655 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5656 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5659 inst.error = _("unrecognized CPS flag");
5664 if (saw_a_flag == 0)
5666 inst.error = _("missing CPS flags");
5674 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5675 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5678 parse_endian_specifier (char **str)
5683 if (strncasecmp (s, "BE", 2))
5685 else if (strncasecmp (s, "LE", 2))
5689 inst.error = _("valid endian specifiers are be or le");
5693 if (ISALNUM (s[2]) || s[2] == '_')
5695 inst.error = _("valid endian specifiers are be or le");
5700 return little_endian;
5703 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5704 value suitable for poking into the rotate field of an sxt or sxta
5705 instruction, or FAIL on error. */
5708 parse_ror (char **str)
5713 if (strncasecmp (s, "ROR", 3) == 0)
5717 inst.error = _("missing rotation field after comma");
5721 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5726 case 0: *str = s; return 0x0;
5727 case 8: *str = s; return 0x1;
5728 case 16: *str = s; return 0x2;
5729 case 24: *str = s; return 0x3;
5732 inst.error = _("rotation can only be 0, 8, 16, or 24");
5737 /* Parse a conditional code (from conds[] below). The value returned is in the
5738 range 0 .. 14, or FAIL. */
5740 parse_cond (char **str)
5743 const struct asm_cond *c;
5745 /* Condition codes are always 2 characters, so matching up to
5746 3 characters is sufficient. */
5751 while (ISALPHA (*q) && n < 3)
5753 cond[n] = TOLOWER (*q);
5758 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5761 inst.error = _("condition required");
5769 /* Parse an option for a barrier instruction. Returns the encoding for the
5772 parse_barrier (char **str)
5775 const struct asm_barrier_opt *o;
5778 while (ISALPHA (*q))
5781 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5790 /* Parse the operands of a table branch instruction. Similar to a memory
5793 parse_tb (char **str)
5798 if (skip_past_char (&p, '[') == FAIL)
5800 inst.error = _("'[' expected");
5804 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5806 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5809 inst.operands[0].reg = reg;
5811 if (skip_past_comma (&p) == FAIL)
5813 inst.error = _("',' expected");
5817 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5819 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5822 inst.operands[0].imm = reg;
5824 if (skip_past_comma (&p) == SUCCESS)
5826 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5828 if (inst.reloc.exp.X_add_number != 1)
5830 inst.error = _("invalid shift");
5833 inst.operands[0].shifted = 1;
5836 if (skip_past_char (&p, ']') == FAIL)
5838 inst.error = _("']' expected");
5845 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5846 information on the types the operands can take and how they are encoded.
5847 Up to four operands may be read; this function handles setting the
5848 ".present" field for each read operand itself.
5849 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5850 else returns FAIL. */
5853 parse_neon_mov (char **str, int *which_operand)
5855 int i = *which_operand, val;
5856 enum arm_reg_type rtype;
5858 struct neon_type_el optype;
5860 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5862 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5863 inst.operands[i].reg = val;
5864 inst.operands[i].isscalar = 1;
5865 inst.operands[i].vectype = optype;
5866 inst.operands[i++].present = 1;
5868 if (skip_past_comma (&ptr) == FAIL)
5871 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5874 inst.operands[i].reg = val;
5875 inst.operands[i].isreg = 1;
5876 inst.operands[i].present = 1;
5878 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5881 /* Cases 0, 1, 2, 3, 5 (D only). */
5882 if (skip_past_comma (&ptr) == FAIL)
5885 inst.operands[i].reg = val;
5886 inst.operands[i].isreg = 1;
5887 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5888 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5889 inst.operands[i].isvec = 1;
5890 inst.operands[i].vectype = optype;
5891 inst.operands[i++].present = 1;
5893 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5895 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5896 Case 13: VMOV <Sd>, <Rm> */
5897 inst.operands[i].reg = val;
5898 inst.operands[i].isreg = 1;
5899 inst.operands[i].present = 1;
5901 if (rtype == REG_TYPE_NQ)
5903 first_error (_("can't use Neon quad register here"));
5906 else if (rtype != REG_TYPE_VFS)
5909 if (skip_past_comma (&ptr) == FAIL)
5911 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5913 inst.operands[i].reg = val;
5914 inst.operands[i].isreg = 1;
5915 inst.operands[i].present = 1;
5918 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5921 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5922 Case 1: VMOV<c><q> <Dd>, <Dm>
5923 Case 8: VMOV.F32 <Sd>, <Sm>
5924 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5926 inst.operands[i].reg = val;
5927 inst.operands[i].isreg = 1;
5928 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5929 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5930 inst.operands[i].isvec = 1;
5931 inst.operands[i].vectype = optype;
5932 inst.operands[i].present = 1;
5934 if (skip_past_comma (&ptr) == SUCCESS)
5939 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5942 inst.operands[i].reg = val;
5943 inst.operands[i].isreg = 1;
5944 inst.operands[i++].present = 1;
5946 if (skip_past_comma (&ptr) == FAIL)
5949 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5952 inst.operands[i].reg = val;
5953 inst.operands[i].isreg = 1;
5954 inst.operands[i].present = 1;
5957 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5958 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5959 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5960 Case 10: VMOV.F32 <Sd>, #<imm>
5961 Case 11: VMOV.F64 <Dd>, #<imm> */
5962 inst.operands[i].immisfloat = 1;
5963 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5964 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5965 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5969 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5973 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5976 inst.operands[i].reg = val;
5977 inst.operands[i].isreg = 1;
5978 inst.operands[i++].present = 1;
5980 if (skip_past_comma (&ptr) == FAIL)
5983 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5985 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5986 inst.operands[i].reg = val;
5987 inst.operands[i].isscalar = 1;
5988 inst.operands[i].present = 1;
5989 inst.operands[i].vectype = optype;
5991 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5993 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5994 inst.operands[i].reg = val;
5995 inst.operands[i].isreg = 1;
5996 inst.operands[i++].present = 1;
5998 if (skip_past_comma (&ptr) == FAIL)
6001 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6004 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6008 inst.operands[i].reg = val;
6009 inst.operands[i].isreg = 1;
6010 inst.operands[i].isvec = 1;
6011 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6012 inst.operands[i].vectype = optype;
6013 inst.operands[i].present = 1;
6015 if (rtype == REG_TYPE_VFS)
6019 if (skip_past_comma (&ptr) == FAIL)
6021 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6024 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6027 inst.operands[i].reg = val;
6028 inst.operands[i].isreg = 1;
6029 inst.operands[i].isvec = 1;
6030 inst.operands[i].issingle = 1;
6031 inst.operands[i].vectype = optype;
6032 inst.operands[i].present = 1;
6035 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6039 inst.operands[i].reg = val;
6040 inst.operands[i].isreg = 1;
6041 inst.operands[i].isvec = 1;
6042 inst.operands[i].issingle = 1;
6043 inst.operands[i].vectype = optype;
6044 inst.operands[i].present = 1;
6049 first_error (_("parse error"));
6053 /* Successfully parsed the operands. Update args. */
6059 first_error (_("expected comma"));
6063 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6067 /* Use this macro when the operand constraints are different
6068 for ARM and THUMB (e.g. ldrd). */
6069 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6070 ((arm_operand) | ((thumb_operand) << 16))
6072 /* Matcher codes for parse_operands. */
6073 enum operand_parse_code
6075 OP_stop, /* end of line */
6077 OP_RR, /* ARM register */
6078 OP_RRnpc, /* ARM register, not r15 */
6079 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6080 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6081 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6082 optional trailing ! */
6083 OP_RRw, /* ARM register, not r15, optional trailing ! */
6084 OP_RCP, /* Coprocessor number */
6085 OP_RCN, /* Coprocessor register */
6086 OP_RF, /* FPA register */
6087 OP_RVS, /* VFP single precision register */
6088 OP_RVD, /* VFP double precision register (0..15) */
6089 OP_RND, /* Neon double precision register (0..31) */
6090 OP_RNQ, /* Neon quad precision register */
6091 OP_RVSD, /* VFP single or double precision register */
6092 OP_RNDQ, /* Neon double or quad precision register */
6093 OP_RNSDQ, /* Neon single, double or quad precision register */
6094 OP_RNSC, /* Neon scalar D[X] */
6095 OP_RVC, /* VFP control register */
6096 OP_RMF, /* Maverick F register */
6097 OP_RMD, /* Maverick D register */
6098 OP_RMFX, /* Maverick FX register */
6099 OP_RMDX, /* Maverick DX register */
6100 OP_RMAX, /* Maverick AX register */
6101 OP_RMDS, /* Maverick DSPSC register */
6102 OP_RIWR, /* iWMMXt wR register */
6103 OP_RIWC, /* iWMMXt wC register */
6104 OP_RIWG, /* iWMMXt wCG register */
6105 OP_RXA, /* XScale accumulator register */
6107 OP_REGLST, /* ARM register list */
6108 OP_VRSLST, /* VFP single-precision register list */
6109 OP_VRDLST, /* VFP double-precision register list */
6110 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6111 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6112 OP_NSTRLST, /* Neon element/structure list */
6114 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6115 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6116 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6117 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6118 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6119 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6120 OP_VMOV, /* Neon VMOV operands. */
6121 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6122 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6123 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6125 OP_I0, /* immediate zero */
6126 OP_I7, /* immediate value 0 .. 7 */
6127 OP_I15, /* 0 .. 15 */
6128 OP_I16, /* 1 .. 16 */
6129 OP_I16z, /* 0 .. 16 */
6130 OP_I31, /* 0 .. 31 */
6131 OP_I31w, /* 0 .. 31, optional trailing ! */
6132 OP_I32, /* 1 .. 32 */
6133 OP_I32z, /* 0 .. 32 */
6134 OP_I63, /* 0 .. 63 */
6135 OP_I63s, /* -64 .. 63 */
6136 OP_I64, /* 1 .. 64 */
6137 OP_I64z, /* 0 .. 64 */
6138 OP_I255, /* 0 .. 255 */
6140 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6141 OP_I7b, /* 0 .. 7 */
6142 OP_I15b, /* 0 .. 15 */
6143 OP_I31b, /* 0 .. 31 */
6145 OP_SH, /* shifter operand */
6146 OP_SHG, /* shifter operand with possible group relocation */
6147 OP_ADDR, /* Memory address expression (any mode) */
6148 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6149 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6150 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6151 OP_EXP, /* arbitrary expression */
6152 OP_EXPi, /* same, with optional immediate prefix */
6153 OP_EXPr, /* same, with optional relocation suffix */
6154 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6156 OP_CPSF, /* CPS flags */
6157 OP_ENDI, /* Endianness specifier */
6158 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6159 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6160 OP_COND, /* conditional code */
6161 OP_TB, /* Table branch. */
6163 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6165 OP_RRnpc_I0, /* ARM register or literal 0 */
6166 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6167 OP_RR_EXi, /* ARM register or expression with imm prefix */
6168 OP_RF_IF, /* FPA register or immediate */
6169 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6170 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6172 /* Optional operands. */
6173 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6174 OP_oI31b, /* 0 .. 31 */
6175 OP_oI32b, /* 1 .. 32 */
6176 OP_oI32z, /* 0 .. 32 */
6177 OP_oIffffb, /* 0 .. 65535 */
6178 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6180 OP_oRR, /* ARM register */
6181 OP_oRRnpc, /* ARM register, not the PC */
6182 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6183 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6184 OP_oRND, /* Optional Neon double precision register */
6185 OP_oRNQ, /* Optional Neon quad precision register */
6186 OP_oRNDQ, /* Optional Neon double or quad precision register */
6187 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6188 OP_oSHll, /* LSL immediate */
6189 OP_oSHar, /* ASR immediate */
6190 OP_oSHllar, /* LSL or ASR immediate */
6191 OP_oROR, /* ROR 0/8/16/24 */
6192 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6194 /* Some pre-defined mixed (ARM/THUMB) operands. */
6195 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6196 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6197 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6199 OP_FIRST_OPTIONAL = OP_oI7b
6202 /* Generic instruction operand parser. This does no encoding and no
6203 semantic validation; it merely squirrels values away in the inst
6204 structure. Returns SUCCESS or FAIL depending on whether the
6205 specified grammar matched. */
6207 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6209 unsigned const int *upat = pattern;
6210 char *backtrack_pos = 0;
6211 const char *backtrack_error = 0;
6212 int i, val = 0, backtrack_index = 0;
6213 enum arm_reg_type rtype;
6214 parse_operand_result result;
6215 unsigned int op_parse_code;
6217 #define po_char_or_fail(chr) \
6220 if (skip_past_char (&str, chr) == FAIL) \
6225 #define po_reg_or_fail(regtype) \
6228 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6229 & inst.operands[i].vectype); \
6232 first_error (_(reg_expected_msgs[regtype])); \
6235 inst.operands[i].reg = val; \
6236 inst.operands[i].isreg = 1; \
6237 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6238 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6239 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6240 || rtype == REG_TYPE_VFD \
6241 || rtype == REG_TYPE_NQ); \
6245 #define po_reg_or_goto(regtype, label) \
6248 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6249 & inst.operands[i].vectype); \
6253 inst.operands[i].reg = val; \
6254 inst.operands[i].isreg = 1; \
6255 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6256 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6257 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6258 || rtype == REG_TYPE_VFD \
6259 || rtype == REG_TYPE_NQ); \
6263 #define po_imm_or_fail(min, max, popt) \
6266 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6268 inst.operands[i].imm = val; \
6272 #define po_scalar_or_goto(elsz, label) \
6275 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6278 inst.operands[i].reg = val; \
6279 inst.operands[i].isscalar = 1; \
6283 #define po_misc_or_fail(expr) \
6291 #define po_misc_or_fail_no_backtrack(expr) \
6295 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6296 backtrack_pos = 0; \
6297 if (result != PARSE_OPERAND_SUCCESS) \
6302 #define po_barrier_or_imm(str) \
6305 val = parse_barrier (&str); \
6308 if (ISALPHA (*str)) \
6315 if ((inst.instruction & 0xf0) == 0x60 \
6318 /* ISB can only take SY as an option. */ \
6319 inst.error = _("invalid barrier type"); \
6326 skip_whitespace (str);
6328 for (i = 0; upat[i] != OP_stop; i++)
6330 op_parse_code = upat[i];
6331 if (op_parse_code >= 1<<16)
6332 op_parse_code = thumb ? (op_parse_code >> 16)
6333 : (op_parse_code & ((1<<16)-1));
6335 if (op_parse_code >= OP_FIRST_OPTIONAL)
6337 /* Remember where we are in case we need to backtrack. */
6338 gas_assert (!backtrack_pos);
6339 backtrack_pos = str;
6340 backtrack_error = inst.error;
6341 backtrack_index = i;
6344 if (i > 0 && (i > 1 || inst.operands[0].present))
6345 po_char_or_fail (',');
6347 switch (op_parse_code)
6355 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6356 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6357 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6358 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6359 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6360 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6362 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6364 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6366 /* Also accept generic coprocessor regs for unknown registers. */
6368 po_reg_or_fail (REG_TYPE_CN);
6370 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6371 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6372 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6373 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6374 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6375 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6376 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6377 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6378 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6379 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6381 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6383 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6384 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6386 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6388 /* Neon scalar. Using an element size of 8 means that some invalid
6389 scalars are accepted here, so deal with those in later code. */
6390 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6394 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6397 po_imm_or_fail (0, 0, TRUE);
6402 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6407 po_scalar_or_goto (8, try_rr);
6410 po_reg_or_fail (REG_TYPE_RN);
6416 po_scalar_or_goto (8, try_nsdq);
6419 po_reg_or_fail (REG_TYPE_NSDQ);
6425 po_scalar_or_goto (8, try_ndq);
6428 po_reg_or_fail (REG_TYPE_NDQ);
6434 po_scalar_or_goto (8, try_vfd);
6437 po_reg_or_fail (REG_TYPE_VFD);
6442 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6443 not careful then bad things might happen. */
6444 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6449 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6452 /* There's a possibility of getting a 64-bit immediate here, so
6453 we need special handling. */
6454 if (parse_big_immediate (&str, i) == FAIL)
6456 inst.error = _("immediate value is out of range");
6464 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6467 po_imm_or_fail (0, 63, TRUE);
6472 po_char_or_fail ('[');
6473 po_reg_or_fail (REG_TYPE_RN);
6474 po_char_or_fail (']');
6480 po_reg_or_fail (REG_TYPE_RN);
6481 if (skip_past_char (&str, '!') == SUCCESS)
6482 inst.operands[i].writeback = 1;
6486 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6487 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6488 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6489 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6490 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6491 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6492 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6493 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6494 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6495 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6496 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6497 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6499 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6501 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6502 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6504 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6505 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6506 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6507 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6509 /* Immediate variants */
6511 po_char_or_fail ('{');
6512 po_imm_or_fail (0, 255, TRUE);
6513 po_char_or_fail ('}');
6517 /* The expression parser chokes on a trailing !, so we have
6518 to find it first and zap it. */
6521 while (*s && *s != ',')
6526 inst.operands[i].writeback = 1;
6528 po_imm_or_fail (0, 31, TRUE);
6536 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6541 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6546 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6548 if (inst.reloc.exp.X_op == O_symbol)
6550 val = parse_reloc (&str);
6553 inst.error = _("unrecognized relocation suffix");
6556 else if (val != BFD_RELOC_UNUSED)
6558 inst.operands[i].imm = val;
6559 inst.operands[i].hasreloc = 1;
6564 /* Operand for MOVW or MOVT. */
6566 po_misc_or_fail (parse_half (&str));
6569 /* Register or expression. */
6570 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6571 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6573 /* Register or immediate. */
6574 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6575 I0: po_imm_or_fail (0, 0, FALSE); break;
6577 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6579 if (!is_immediate_prefix (*str))
6582 val = parse_fpa_immediate (&str);
6585 /* FPA immediates are encoded as registers 8-15.
6586 parse_fpa_immediate has already applied the offset. */
6587 inst.operands[i].reg = val;
6588 inst.operands[i].isreg = 1;
6591 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6592 I32z: po_imm_or_fail (0, 32, FALSE); break;
6594 /* Two kinds of register. */
6597 struct reg_entry *rege = arm_reg_parse_multi (&str);
6599 || (rege->type != REG_TYPE_MMXWR
6600 && rege->type != REG_TYPE_MMXWC
6601 && rege->type != REG_TYPE_MMXWCG))
6603 inst.error = _("iWMMXt data or control register expected");
6606 inst.operands[i].reg = rege->number;
6607 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6613 struct reg_entry *rege = arm_reg_parse_multi (&str);
6615 || (rege->type != REG_TYPE_MMXWC
6616 && rege->type != REG_TYPE_MMXWCG))
6618 inst.error = _("iWMMXt control register expected");
6621 inst.operands[i].reg = rege->number;
6622 inst.operands[i].isreg = 1;
6627 case OP_CPSF: val = parse_cps_flags (&str); break;
6628 case OP_ENDI: val = parse_endian_specifier (&str); break;
6629 case OP_oROR: val = parse_ror (&str); break;
6630 case OP_COND: val = parse_cond (&str); break;
6631 case OP_oBARRIER_I15:
6632 po_barrier_or_imm (str); break;
6634 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6640 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6641 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6643 inst.error = _("Banked registers are not available with this "
6649 val = parse_psr (&str, op_parse_code == OP_wPSR);
6653 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6656 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6658 if (strncasecmp (str, "APSR_", 5) == 0)
6665 case 'c': found = (found & 1) ? 16 : found | 1; break;
6666 case 'n': found = (found & 2) ? 16 : found | 2; break;
6667 case 'z': found = (found & 4) ? 16 : found | 4; break;
6668 case 'v': found = (found & 8) ? 16 : found | 8; break;
6669 default: found = 16;
6673 inst.operands[i].isvec = 1;
6674 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6675 inst.operands[i].reg = REG_PC;
6682 po_misc_or_fail (parse_tb (&str));
6685 /* Register lists. */
6687 val = parse_reg_list (&str);
6690 inst.operands[1].writeback = 1;
6696 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6700 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6704 /* Allow Q registers too. */
6705 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6710 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6712 inst.operands[i].issingle = 1;
6717 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6722 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6723 &inst.operands[i].vectype);
6726 /* Addressing modes */
6728 po_misc_or_fail (parse_address (&str, i));
6732 po_misc_or_fail_no_backtrack (
6733 parse_address_group_reloc (&str, i, GROUP_LDR));
6737 po_misc_or_fail_no_backtrack (
6738 parse_address_group_reloc (&str, i, GROUP_LDRS));
6742 po_misc_or_fail_no_backtrack (
6743 parse_address_group_reloc (&str, i, GROUP_LDC));
6747 po_misc_or_fail (parse_shifter_operand (&str, i));
6751 po_misc_or_fail_no_backtrack (
6752 parse_shifter_operand_group_reloc (&str, i));
6756 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6760 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6764 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6768 as_fatal (_("unhandled operand code %d"), op_parse_code);
6771 /* Various value-based sanity checks and shared operations. We
6772 do not signal immediate failures for the register constraints;
6773 this allows a syntax error to take precedence. */
6774 switch (op_parse_code)
6782 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6783 inst.error = BAD_PC;
6788 if (inst.operands[i].isreg)
6790 if (inst.operands[i].reg == REG_PC)
6791 inst.error = BAD_PC;
6792 else if (inst.operands[i].reg == REG_SP)
6793 inst.error = BAD_SP;
6798 if (inst.operands[i].isreg
6799 && inst.operands[i].reg == REG_PC
6800 && (inst.operands[i].writeback || thumb))
6801 inst.error = BAD_PC;
6810 case OP_oBARRIER_I15:
6819 inst.operands[i].imm = val;
6826 /* If we get here, this operand was successfully parsed. */
6827 inst.operands[i].present = 1;
6831 inst.error = BAD_ARGS;
6836 /* The parse routine should already have set inst.error, but set a
6837 default here just in case. */
6839 inst.error = _("syntax error");
6843 /* Do not backtrack over a trailing optional argument that
6844 absorbed some text. We will only fail again, with the
6845 'garbage following instruction' error message, which is
6846 probably less helpful than the current one. */
6847 if (backtrack_index == i && backtrack_pos != str
6848 && upat[i+1] == OP_stop)
6851 inst.error = _("syntax error");
6855 /* Try again, skipping the optional argument at backtrack_pos. */
6856 str = backtrack_pos;
6857 inst.error = backtrack_error;
6858 inst.operands[backtrack_index].present = 0;
6859 i = backtrack_index;
6863 /* Check that we have parsed all the arguments. */
6864 if (*str != '\0' && !inst.error)
6865 inst.error = _("garbage following instruction");
6867 return inst.error ? FAIL : SUCCESS;
6870 #undef po_char_or_fail
6871 #undef po_reg_or_fail
6872 #undef po_reg_or_goto
6873 #undef po_imm_or_fail
6874 #undef po_scalar_or_fail
6875 #undef po_barrier_or_imm
6877 /* Shorthand macro for instruction encoding functions issuing errors. */
6878 #define constraint(expr, err) \
6889 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6890 instructions are unpredictable if these registers are used. This
6891 is the BadReg predicate in ARM's Thumb-2 documentation. */
6892 #define reject_bad_reg(reg) \
6894 if (reg == REG_SP || reg == REG_PC) \
6896 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6901 /* If REG is R13 (the stack pointer), warn that its use is
6903 #define warn_deprecated_sp(reg) \
6905 if (warn_on_deprecated && reg == REG_SP) \
6906 as_warn (_("use of r13 is deprecated")); \
6909 /* Functions for operand encoding. ARM, then Thumb. */
6911 #define rotate_left(v, n) (v << n | v >> (32 - n))
6913 /* If VAL can be encoded in the immediate field of an ARM instruction,
6914 return the encoded form. Otherwise, return FAIL. */
6917 encode_arm_immediate (unsigned int val)
6921 for (i = 0; i < 32; i += 2)
6922 if ((a = rotate_left (val, i)) <= 0xff)
6923 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6928 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6929 return the encoded form. Otherwise, return FAIL. */
6931 encode_thumb32_immediate (unsigned int val)
6938 for (i = 1; i <= 24; i++)
6941 if ((val & ~(0xff << i)) == 0)
6942 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6946 if (val == ((a << 16) | a))
6948 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6952 if (val == ((a << 16) | a))
6953 return 0x200 | (a >> 8);
6957 /* Encode a VFP SP or DP register number into inst.instruction. */
6960 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6962 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6965 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6968 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6971 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6976 first_error (_("D register out of range for selected VFP version"));
6984 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6988 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6992 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6996 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7000 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7004 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7012 /* Encode a <shift> in an ARM-format instruction. The immediate,
7013 if any, is handled by md_apply_fix. */
7015 encode_arm_shift (int i)
7017 if (inst.operands[i].shift_kind == SHIFT_RRX)
7018 inst.instruction |= SHIFT_ROR << 5;
7021 inst.instruction |= inst.operands[i].shift_kind << 5;
7022 if (inst.operands[i].immisreg)
7024 inst.instruction |= SHIFT_BY_REG;
7025 inst.instruction |= inst.operands[i].imm << 8;
7028 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7033 encode_arm_shifter_operand (int i)
7035 if (inst.operands[i].isreg)
7037 inst.instruction |= inst.operands[i].reg;
7038 encode_arm_shift (i);
7042 inst.instruction |= INST_IMMEDIATE;
7043 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7044 inst.instruction |= inst.operands[i].imm;
7048 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7050 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7053 Generate an error if the operand is not a register. */
7054 constraint (!inst.operands[i].isreg,
7055 _("Instruction does not support =N addresses"));
7057 inst.instruction |= inst.operands[i].reg << 16;
7059 if (inst.operands[i].preind)
7063 inst.error = _("instruction does not accept preindexed addressing");
7066 inst.instruction |= PRE_INDEX;
7067 if (inst.operands[i].writeback)
7068 inst.instruction |= WRITE_BACK;
7071 else if (inst.operands[i].postind)
7073 gas_assert (inst.operands[i].writeback);
7075 inst.instruction |= WRITE_BACK;
7077 else /* unindexed - only for coprocessor */
7079 inst.error = _("instruction does not accept unindexed addressing");
7083 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7084 && (((inst.instruction & 0x000f0000) >> 16)
7085 == ((inst.instruction & 0x0000f000) >> 12)))
7086 as_warn ((inst.instruction & LOAD_BIT)
7087 ? _("destination register same as write-back base")
7088 : _("source register same as write-back base"));
7091 /* inst.operands[i] was set up by parse_address. Encode it into an
7092 ARM-format mode 2 load or store instruction. If is_t is true,
7093 reject forms that cannot be used with a T instruction (i.e. not
7096 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7098 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7100 encode_arm_addr_mode_common (i, is_t);
7102 if (inst.operands[i].immisreg)
7104 constraint ((inst.operands[i].imm == REG_PC
7105 || (is_pc && inst.operands[i].writeback)),
7107 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7108 inst.instruction |= inst.operands[i].imm;
7109 if (!inst.operands[i].negative)
7110 inst.instruction |= INDEX_UP;
7111 if (inst.operands[i].shifted)
7113 if (inst.operands[i].shift_kind == SHIFT_RRX)
7114 inst.instruction |= SHIFT_ROR << 5;
7117 inst.instruction |= inst.operands[i].shift_kind << 5;
7118 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7122 else /* immediate offset in inst.reloc */
7124 if (is_pc && !inst.reloc.pc_rel)
7126 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7128 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7129 cannot use PC in addressing.
7130 PC cannot be used in writeback addressing, either. */
7131 constraint ((is_t || inst.operands[i].writeback),
7134 /* Use of PC in str is deprecated for ARMv7. */
7135 if (warn_on_deprecated
7137 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7138 as_warn (_("use of PC in this instruction is deprecated"));
7141 if (inst.reloc.type == BFD_RELOC_UNUSED)
7143 /* Prefer + for zero encoded value. */
7144 if (!inst.operands[i].negative)
7145 inst.instruction |= INDEX_UP;
7146 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7151 /* inst.operands[i] was set up by parse_address. Encode it into an
7152 ARM-format mode 3 load or store instruction. Reject forms that
7153 cannot be used with such instructions. If is_t is true, reject
7154 forms that cannot be used with a T instruction (i.e. not
7157 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7159 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7161 inst.error = _("instruction does not accept scaled register index");
7165 encode_arm_addr_mode_common (i, is_t);
7167 if (inst.operands[i].immisreg)
7169 constraint ((inst.operands[i].imm == REG_PC
7170 || inst.operands[i].reg == REG_PC),
7172 inst.instruction |= inst.operands[i].imm;
7173 if (!inst.operands[i].negative)
7174 inst.instruction |= INDEX_UP;
7176 else /* immediate offset in inst.reloc */
7178 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7179 && inst.operands[i].writeback),
7181 inst.instruction |= HWOFFSET_IMM;
7182 if (inst.reloc.type == BFD_RELOC_UNUSED)
7184 /* Prefer + for zero encoded value. */
7185 if (!inst.operands[i].negative)
7186 inst.instruction |= INDEX_UP;
7188 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7193 /* inst.operands[i] was set up by parse_address. Encode it into an
7194 ARM-format instruction. Reject all forms which cannot be encoded
7195 into a coprocessor load/store instruction. If wb_ok is false,
7196 reject use of writeback; if unind_ok is false, reject use of
7197 unindexed addressing. If reloc_override is not 0, use it instead
7198 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7199 (in which case it is preserved). */
7202 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7204 inst.instruction |= inst.operands[i].reg << 16;
7206 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7208 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7210 gas_assert (!inst.operands[i].writeback);
7213 inst.error = _("instruction does not support unindexed addressing");
7216 inst.instruction |= inst.operands[i].imm;
7217 inst.instruction |= INDEX_UP;
7221 if (inst.operands[i].preind)
7222 inst.instruction |= PRE_INDEX;
7224 if (inst.operands[i].writeback)
7226 if (inst.operands[i].reg == REG_PC)
7228 inst.error = _("pc may not be used with write-back");
7233 inst.error = _("instruction does not support writeback");
7236 inst.instruction |= WRITE_BACK;
7240 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7241 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7242 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7243 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7246 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7248 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7251 /* Prefer + for zero encoded value. */
7252 if (!inst.operands[i].negative)
7253 inst.instruction |= INDEX_UP;
7258 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7259 Determine whether it can be performed with a move instruction; if
7260 it can, convert inst.instruction to that move instruction and
7261 return TRUE; if it can't, convert inst.instruction to a literal-pool
7262 load and return FALSE. If this is not a valid thing to do in the
7263 current context, set inst.error and return TRUE.
7265 inst.operands[i] describes the destination register. */
7268 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7273 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7277 if ((inst.instruction & tbit) == 0)
7279 inst.error = _("invalid pseudo operation");
7282 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7284 inst.error = _("constant expression expected");
7287 if (inst.reloc.exp.X_op == O_constant)
7291 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7293 /* This can be done with a mov(1) instruction. */
7294 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7295 inst.instruction |= inst.reloc.exp.X_add_number;
7301 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7304 /* This can be done with a mov instruction. */
7305 inst.instruction &= LITERAL_MASK;
7306 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7307 inst.instruction |= value & 0xfff;
7311 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7314 /* This can be done with a mvn instruction. */
7315 inst.instruction &= LITERAL_MASK;
7316 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7317 inst.instruction |= value & 0xfff;
7323 if (add_to_lit_pool () == FAIL)
7325 inst.error = _("literal pool insertion failed");
7328 inst.operands[1].reg = REG_PC;
7329 inst.operands[1].isreg = 1;
7330 inst.operands[1].preind = 1;
7331 inst.reloc.pc_rel = 1;
7332 inst.reloc.type = (thumb_p
7333 ? BFD_RELOC_ARM_THUMB_OFFSET
7335 ? BFD_RELOC_ARM_HWLITERAL
7336 : BFD_RELOC_ARM_LITERAL));
7340 /* Functions for instruction encoding, sorted by sub-architecture.
7341 First some generics; their names are taken from the conventional
7342 bit positions for register arguments in ARM format instructions. */
7352 inst.instruction |= inst.operands[0].reg << 12;
7358 inst.instruction |= inst.operands[0].reg << 12;
7359 inst.instruction |= inst.operands[1].reg;
7365 inst.instruction |= inst.operands[0].reg << 12;
7366 inst.instruction |= inst.operands[1].reg << 16;
7372 inst.instruction |= inst.operands[0].reg << 16;
7373 inst.instruction |= inst.operands[1].reg << 12;
7377 check_obsolete (const arm_feature_set *feature, const char *msg)
7379 if (ARM_CPU_IS_ANY (cpu_variant))
7381 as_warn ("%s", msg);
7384 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7396 unsigned Rn = inst.operands[2].reg;
7397 /* Enforce restrictions on SWP instruction. */
7398 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7400 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7401 _("Rn must not overlap other operands"));
7403 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
7405 if (!check_obsolete (&arm_ext_v8,
7406 _("swp{b} use is obsoleted for ARMv8 and later"))
7407 && warn_on_deprecated
7408 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
7409 as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
7412 inst.instruction |= inst.operands[0].reg << 12;
7413 inst.instruction |= inst.operands[1].reg;
7414 inst.instruction |= Rn << 16;
7420 inst.instruction |= inst.operands[0].reg << 12;
7421 inst.instruction |= inst.operands[1].reg << 16;
7422 inst.instruction |= inst.operands[2].reg;
7428 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7429 constraint (((inst.reloc.exp.X_op != O_constant
7430 && inst.reloc.exp.X_op != O_illegal)
7431 || inst.reloc.exp.X_add_number != 0),
7433 inst.instruction |= inst.operands[0].reg;
7434 inst.instruction |= inst.operands[1].reg << 12;
7435 inst.instruction |= inst.operands[2].reg << 16;
7441 inst.instruction |= inst.operands[0].imm;
7447 inst.instruction |= inst.operands[0].reg << 12;
7448 encode_arm_cp_address (1, TRUE, TRUE, 0);
7451 /* ARM instructions, in alphabetical order by function name (except
7452 that wrapper functions appear immediately after the function they
7455 /* This is a pseudo-op of the form "adr rd, label" to be converted
7456 into a relative address of the form "add rd, pc, #label-.-8". */
7461 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7463 /* Frag hacking will turn this into a sub instruction if the offset turns
7464 out to be negative. */
7465 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7466 inst.reloc.pc_rel = 1;
7467 inst.reloc.exp.X_add_number -= 8;
7470 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7471 into a relative address of the form:
7472 add rd, pc, #low(label-.-8)"
7473 add rd, rd, #high(label-.-8)" */
7478 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7480 /* Frag hacking will turn this into a sub instruction if the offset turns
7481 out to be negative. */
7482 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7483 inst.reloc.pc_rel = 1;
7484 inst.size = INSN_SIZE * 2;
7485 inst.reloc.exp.X_add_number -= 8;
7491 if (!inst.operands[1].present)
7492 inst.operands[1].reg = inst.operands[0].reg;
7493 inst.instruction |= inst.operands[0].reg << 12;
7494 inst.instruction |= inst.operands[1].reg << 16;
7495 encode_arm_shifter_operand (2);
7501 if (inst.operands[0].present)
7503 constraint ((inst.instruction & 0xf0) != 0x40
7504 && inst.operands[0].imm > 0xf
7505 && inst.operands[0].imm < 0x0,
7506 _("bad barrier type"));
7507 inst.instruction |= inst.operands[0].imm;
7510 inst.instruction |= 0xf;
7516 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7517 constraint (msb > 32, _("bit-field extends past end of register"));
7518 /* The instruction encoding stores the LSB and MSB,
7519 not the LSB and width. */
7520 inst.instruction |= inst.operands[0].reg << 12;
7521 inst.instruction |= inst.operands[1].imm << 7;
7522 inst.instruction |= (msb - 1) << 16;
7530 /* #0 in second position is alternative syntax for bfc, which is
7531 the same instruction but with REG_PC in the Rm field. */
7532 if (!inst.operands[1].isreg)
7533 inst.operands[1].reg = REG_PC;
7535 msb = inst.operands[2].imm + inst.operands[3].imm;
7536 constraint (msb > 32, _("bit-field extends past end of register"));
7537 /* The instruction encoding stores the LSB and MSB,
7538 not the LSB and width. */
7539 inst.instruction |= inst.operands[0].reg << 12;
7540 inst.instruction |= inst.operands[1].reg;
7541 inst.instruction |= inst.operands[2].imm << 7;
7542 inst.instruction |= (msb - 1) << 16;
7548 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7549 _("bit-field extends past end of register"));
7550 inst.instruction |= inst.operands[0].reg << 12;
7551 inst.instruction |= inst.operands[1].reg;
7552 inst.instruction |= inst.operands[2].imm << 7;
7553 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7556 /* ARM V5 breakpoint instruction (argument parse)
7557 BKPT <16 bit unsigned immediate>
7558 Instruction is not conditional.
7559 The bit pattern given in insns[] has the COND_ALWAYS condition,
7560 and it is an error if the caller tried to override that. */
7565 /* Top 12 of 16 bits to bits 19:8. */
7566 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7568 /* Bottom 4 of 16 bits to bits 3:0. */
7569 inst.instruction |= inst.operands[0].imm & 0xf;
7573 encode_branch (int default_reloc)
7575 if (inst.operands[0].hasreloc)
7577 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7578 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7579 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7580 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7581 ? BFD_RELOC_ARM_PLT32
7582 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7585 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7586 inst.reloc.pc_rel = 1;
7593 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7594 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7597 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7604 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7606 if (inst.cond == COND_ALWAYS)
7607 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7609 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7613 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7616 /* ARM V5 branch-link-exchange instruction (argument parse)
7617 BLX <target_addr> ie BLX(1)
7618 BLX{<condition>} <Rm> ie BLX(2)
7619 Unfortunately, there are two different opcodes for this mnemonic.
7620 So, the insns[].value is not used, and the code here zaps values
7621 into inst.instruction.
7622 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7627 if (inst.operands[0].isreg)
7629 /* Arg is a register; the opcode provided by insns[] is correct.
7630 It is not illegal to do "blx pc", just useless. */
7631 if (inst.operands[0].reg == REG_PC)
7632 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7634 inst.instruction |= inst.operands[0].reg;
7638 /* Arg is an address; this instruction cannot be executed
7639 conditionally, and the opcode must be adjusted.
7640 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7641 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7642 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7643 inst.instruction = 0xfa000000;
7644 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7651 bfd_boolean want_reloc;
7653 if (inst.operands[0].reg == REG_PC)
7654 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7656 inst.instruction |= inst.operands[0].reg;
7657 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7658 it is for ARMv4t or earlier. */
7659 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7660 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7664 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7669 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7673 /* ARM v5TEJ. Jump to Jazelle code. */
7678 if (inst.operands[0].reg == REG_PC)
7679 as_tsktsk (_("use of r15 in bxj is not really useful"));
7681 inst.instruction |= inst.operands[0].reg;
7684 /* Co-processor data operation:
7685 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7686 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7690 inst.instruction |= inst.operands[0].reg << 8;
7691 inst.instruction |= inst.operands[1].imm << 20;
7692 inst.instruction |= inst.operands[2].reg << 12;
7693 inst.instruction |= inst.operands[3].reg << 16;
7694 inst.instruction |= inst.operands[4].reg;
7695 inst.instruction |= inst.operands[5].imm << 5;
7701 inst.instruction |= inst.operands[0].reg << 16;
7702 encode_arm_shifter_operand (1);
7705 /* Transfer between coprocessor and ARM registers.
7706 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7711 No special properties. */
7713 struct deprecated_coproc_regs_s
7720 arm_feature_set deprecated;
7721 arm_feature_set obsoleted;
7722 const char *dep_msg;
7723 const char *obs_msg;
7726 #define DEPR_ACCESS_V8 \
7727 N_("This coprocessor register access is deprecated in ARMv8")
7729 /* Table of all deprecated coprocessor registers. */
7730 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
7732 {15, 0, 7, 10, 5, /* CP15DMB. */
7733 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7734 DEPR_ACCESS_V8, NULL},
7735 {15, 0, 7, 10, 4, /* CP15DSB. */
7736 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7737 DEPR_ACCESS_V8, NULL},
7738 {15, 0, 7, 5, 4, /* CP15ISB. */
7739 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7740 DEPR_ACCESS_V8, NULL},
7741 {14, 6, 1, 0, 0, /* TEEHBR. */
7742 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7743 DEPR_ACCESS_V8, NULL},
7744 {14, 6, 0, 0, 0, /* TEECR. */
7745 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
7746 DEPR_ACCESS_V8, NULL},
7749 #undef DEPR_ACCESS_V8
7751 static const size_t deprecated_coproc_reg_count =
7752 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
7760 Rd = inst.operands[2].reg;
7763 if (inst.instruction == 0xee000010
7764 || inst.instruction == 0xfe000010)
7766 reject_bad_reg (Rd);
7769 constraint (Rd == REG_SP, BAD_SP);
7774 if (inst.instruction == 0xe000010)
7775 constraint (Rd == REG_PC, BAD_PC);
7778 for (i = 0; i < deprecated_coproc_reg_count; ++i)
7780 const struct deprecated_coproc_regs_s *r =
7781 deprecated_coproc_regs + i;
7783 if (inst.operands[0].reg == r->cp
7784 && inst.operands[1].imm == r->opc1
7785 && inst.operands[3].reg == r->crn
7786 && inst.operands[4].reg == r->crm
7787 && inst.operands[5].imm == r->opc2)
7789 if (!check_obsolete (&r->obsoleted, r->obs_msg)
7790 && warn_on_deprecated
7791 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
7792 as_warn ("%s", r->dep_msg);
7796 inst.instruction |= inst.operands[0].reg << 8;
7797 inst.instruction |= inst.operands[1].imm << 21;
7798 inst.instruction |= Rd << 12;
7799 inst.instruction |= inst.operands[3].reg << 16;
7800 inst.instruction |= inst.operands[4].reg;
7801 inst.instruction |= inst.operands[5].imm << 5;
7804 /* Transfer between coprocessor register and pair of ARM registers.
7805 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7810 Two XScale instructions are special cases of these:
7812 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7813 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7815 Result unpredictable if Rd or Rn is R15. */
7822 Rd = inst.operands[2].reg;
7823 Rn = inst.operands[3].reg;
7827 reject_bad_reg (Rd);
7828 reject_bad_reg (Rn);
7832 constraint (Rd == REG_PC, BAD_PC);
7833 constraint (Rn == REG_PC, BAD_PC);
7836 inst.instruction |= inst.operands[0].reg << 8;
7837 inst.instruction |= inst.operands[1].imm << 4;
7838 inst.instruction |= Rd << 12;
7839 inst.instruction |= Rn << 16;
7840 inst.instruction |= inst.operands[4].reg;
7846 inst.instruction |= inst.operands[0].imm << 6;
7847 if (inst.operands[1].present)
7849 inst.instruction |= CPSI_MMOD;
7850 inst.instruction |= inst.operands[1].imm;
7857 inst.instruction |= inst.operands[0].imm;
7863 unsigned Rd, Rn, Rm;
7865 Rd = inst.operands[0].reg;
7866 Rn = (inst.operands[1].present
7867 ? inst.operands[1].reg : Rd);
7868 Rm = inst.operands[2].reg;
7870 constraint ((Rd == REG_PC), BAD_PC);
7871 constraint ((Rn == REG_PC), BAD_PC);
7872 constraint ((Rm == REG_PC), BAD_PC);
7874 inst.instruction |= Rd << 16;
7875 inst.instruction |= Rn << 0;
7876 inst.instruction |= Rm << 8;
7882 /* There is no IT instruction in ARM mode. We
7883 process it to do the validation as if in
7884 thumb mode, just in case the code gets
7885 assembled for thumb using the unified syntax. */
7890 set_it_insn_type (IT_INSN);
7891 now_it.mask = (inst.instruction & 0xf) | 0x10;
7892 now_it.cc = inst.operands[0].imm;
7896 /* If there is only one register in the register list,
7897 then return its register number. Otherwise return -1. */
7899 only_one_reg_in_list (int range)
7901 int i = ffs (range) - 1;
7902 return (i > 15 || range != (1 << i)) ? -1 : i;
7906 encode_ldmstm(int from_push_pop_mnem)
7908 int base_reg = inst.operands[0].reg;
7909 int range = inst.operands[1].imm;
7912 inst.instruction |= base_reg << 16;
7913 inst.instruction |= range;
7915 if (inst.operands[1].writeback)
7916 inst.instruction |= LDM_TYPE_2_OR_3;
7918 if (inst.operands[0].writeback)
7920 inst.instruction |= WRITE_BACK;
7921 /* Check for unpredictable uses of writeback. */
7922 if (inst.instruction & LOAD_BIT)
7924 /* Not allowed in LDM type 2. */
7925 if ((inst.instruction & LDM_TYPE_2_OR_3)
7926 && ((range & (1 << REG_PC)) == 0))
7927 as_warn (_("writeback of base register is UNPREDICTABLE"));
7928 /* Only allowed if base reg not in list for other types. */
7929 else if (range & (1 << base_reg))
7930 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7934 /* Not allowed for type 2. */
7935 if (inst.instruction & LDM_TYPE_2_OR_3)
7936 as_warn (_("writeback of base register is UNPREDICTABLE"));
7937 /* Only allowed if base reg not in list, or first in list. */
7938 else if ((range & (1 << base_reg))
7939 && (range & ((1 << base_reg) - 1)))
7940 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7944 /* If PUSH/POP has only one register, then use the A2 encoding. */
7945 one_reg = only_one_reg_in_list (range);
7946 if (from_push_pop_mnem && one_reg >= 0)
7948 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
7950 inst.instruction &= A_COND_MASK;
7951 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
7952 inst.instruction |= one_reg << 12;
7959 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
7962 /* ARMv5TE load-consecutive (argument parse)
7971 constraint (inst.operands[0].reg % 2 != 0,
7972 _("first transfer register must be even"));
7973 constraint (inst.operands[1].present
7974 && inst.operands[1].reg != inst.operands[0].reg + 1,
7975 _("can only transfer two consecutive registers"));
7976 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7977 constraint (!inst.operands[2].isreg, _("'[' expected"));
7979 if (!inst.operands[1].present)
7980 inst.operands[1].reg = inst.operands[0].reg + 1;
7982 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7983 register and the first register written; we have to diagnose
7984 overlap between the base and the second register written here. */
7986 if (inst.operands[2].reg == inst.operands[1].reg
7987 && (inst.operands[2].writeback || inst.operands[2].postind))
7988 as_warn (_("base register written back, and overlaps "
7989 "second transfer register"));
7991 if (!(inst.instruction & V4_STR_BIT))
7993 /* For an index-register load, the index register must not overlap the
7994 destination (even if not write-back). */
7995 if (inst.operands[2].immisreg
7996 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7997 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7998 as_warn (_("index register overlaps transfer register"));
8000 inst.instruction |= inst.operands[0].reg << 12;
8001 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8007 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8008 || inst.operands[1].postind || inst.operands[1].writeback
8009 || inst.operands[1].immisreg || inst.operands[1].shifted
8010 || inst.operands[1].negative
8011 /* This can arise if the programmer has written
8013 or if they have mistakenly used a register name as the last
8016 It is very difficult to distinguish between these two cases
8017 because "rX" might actually be a label. ie the register
8018 name has been occluded by a symbol of the same name. So we
8019 just generate a general 'bad addressing mode' type error
8020 message and leave it up to the programmer to discover the
8021 true cause and fix their mistake. */
8022 || (inst.operands[1].reg == REG_PC),
8025 constraint (inst.reloc.exp.X_op != O_constant
8026 || inst.reloc.exp.X_add_number != 0,
8027 _("offset must be zero in ARM encoding"));
8029 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8031 inst.instruction |= inst.operands[0].reg << 12;
8032 inst.instruction |= inst.operands[1].reg << 16;
8033 inst.reloc.type = BFD_RELOC_UNUSED;
8039 constraint (inst.operands[0].reg % 2 != 0,
8040 _("even register required"));
8041 constraint (inst.operands[1].present
8042 && inst.operands[1].reg != inst.operands[0].reg + 1,
8043 _("can only load two consecutive registers"));
8044 /* If op 1 were present and equal to PC, this function wouldn't
8045 have been called in the first place. */
8046 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8048 inst.instruction |= inst.operands[0].reg << 12;
8049 inst.instruction |= inst.operands[2].reg << 16;
8052 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8053 which is not a multiple of four is UNPREDICTABLE. */
8055 check_ldr_r15_aligned (void)
8057 constraint (!(inst.operands[1].immisreg)
8058 && (inst.operands[0].reg == REG_PC
8059 && inst.operands[1].reg == REG_PC
8060 && (inst.reloc.exp.X_add_number & 0x3)),
8061 _("ldr to register 15 must be 4-byte alligned"));
8067 inst.instruction |= inst.operands[0].reg << 12;
8068 if (!inst.operands[1].isreg)
8069 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
8071 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8072 check_ldr_r15_aligned ();
8078 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8080 if (inst.operands[1].preind)
8082 constraint (inst.reloc.exp.X_op != O_constant
8083 || inst.reloc.exp.X_add_number != 0,
8084 _("this instruction requires a post-indexed address"));
8086 inst.operands[1].preind = 0;
8087 inst.operands[1].postind = 1;
8088 inst.operands[1].writeback = 1;
8090 inst.instruction |= inst.operands[0].reg << 12;
8091 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8094 /* Halfword and signed-byte load/store operations. */
8099 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8100 inst.instruction |= inst.operands[0].reg << 12;
8101 if (!inst.operands[1].isreg)
8102 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
8104 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8110 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8112 if (inst.operands[1].preind)
8114 constraint (inst.reloc.exp.X_op != O_constant
8115 || inst.reloc.exp.X_add_number != 0,
8116 _("this instruction requires a post-indexed address"));
8118 inst.operands[1].preind = 0;
8119 inst.operands[1].postind = 1;
8120 inst.operands[1].writeback = 1;
8122 inst.instruction |= inst.operands[0].reg << 12;
8123 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8126 /* Co-processor register load/store.
8127 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8131 inst.instruction |= inst.operands[0].reg << 8;
8132 inst.instruction |= inst.operands[1].reg << 12;
8133 encode_arm_cp_address (2, TRUE, TRUE, 0);
8139 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8140 if (inst.operands[0].reg == inst.operands[1].reg
8141 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8142 && !(inst.instruction & 0x00400000))
8143 as_tsktsk (_("Rd and Rm should be different in mla"));
8145 inst.instruction |= inst.operands[0].reg << 16;
8146 inst.instruction |= inst.operands[1].reg;
8147 inst.instruction |= inst.operands[2].reg << 8;
8148 inst.instruction |= inst.operands[3].reg << 12;
8154 inst.instruction |= inst.operands[0].reg << 12;
8155 encode_arm_shifter_operand (1);
8158 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8165 top = (inst.instruction & 0x00400000) != 0;
8166 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8167 _(":lower16: not allowed this instruction"));
8168 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8169 _(":upper16: not allowed instruction"));
8170 inst.instruction |= inst.operands[0].reg << 12;
8171 if (inst.reloc.type == BFD_RELOC_UNUSED)
8173 imm = inst.reloc.exp.X_add_number;
8174 /* The value is in two pieces: 0:11, 16:19. */
8175 inst.instruction |= (imm & 0x00000fff);
8176 inst.instruction |= (imm & 0x0000f000) << 4;
8180 static void do_vfp_nsyn_opcode (const char *);
8183 do_vfp_nsyn_mrs (void)
8185 if (inst.operands[0].isvec)
8187 if (inst.operands[1].reg != 1)
8188 first_error (_("operand 1 must be FPSCR"));
8189 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8190 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8191 do_vfp_nsyn_opcode ("fmstat");
8193 else if (inst.operands[1].isvec)
8194 do_vfp_nsyn_opcode ("fmrx");
8202 do_vfp_nsyn_msr (void)
8204 if (inst.operands[0].isvec)
8205 do_vfp_nsyn_opcode ("fmxr");
8215 unsigned Rt = inst.operands[0].reg;
8217 if (thumb_mode && inst.operands[0].reg == REG_SP)
8219 inst.error = BAD_SP;
8223 /* APSR_ sets isvec. All other refs to PC are illegal. */
8224 if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
8226 inst.error = BAD_PC;
8230 switch (inst.operands[1].reg)
8237 inst.instruction |= (inst.operands[1].reg << 16);
8240 first_error (_("operand 1 must be a VFP extension System Register"));
8243 inst.instruction |= (Rt << 12);
8249 unsigned Rt = inst.operands[1].reg;
8252 reject_bad_reg (Rt);
8253 else if (Rt == REG_PC)
8255 inst.error = BAD_PC;
8259 switch (inst.operands[0].reg)
8264 inst.instruction |= (inst.operands[0].reg << 16);
8267 first_error (_("operand 0 must be FPSID or FPSCR pr FPEXC"));
8270 inst.instruction |= (Rt << 12);
8278 if (do_vfp_nsyn_mrs () == SUCCESS)
8281 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8282 inst.instruction |= inst.operands[0].reg << 12;
8284 if (inst.operands[1].isreg)
8286 br = inst.operands[1].reg;
8287 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8288 as_bad (_("bad register for mrs"));
8292 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8293 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8295 _("'APSR', 'CPSR' or 'SPSR' expected"));
8296 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8299 inst.instruction |= br;
8302 /* Two possible forms:
8303 "{C|S}PSR_<field>, Rm",
8304 "{C|S}PSR_f, #expression". */
8309 if (do_vfp_nsyn_msr () == SUCCESS)
8312 inst.instruction |= inst.operands[0].imm;
8313 if (inst.operands[1].isreg)
8314 inst.instruction |= inst.operands[1].reg;
8317 inst.instruction |= INST_IMMEDIATE;
8318 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8319 inst.reloc.pc_rel = 0;
8326 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8328 if (!inst.operands[2].present)
8329 inst.operands[2].reg = inst.operands[0].reg;
8330 inst.instruction |= inst.operands[0].reg << 16;
8331 inst.instruction |= inst.operands[1].reg;
8332 inst.instruction |= inst.operands[2].reg << 8;
8334 if (inst.operands[0].reg == inst.operands[1].reg
8335 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8336 as_tsktsk (_("Rd and Rm should be different in mul"));
8339 /* Long Multiply Parser
8340 UMULL RdLo, RdHi, Rm, Rs
8341 SMULL RdLo, RdHi, Rm, Rs
8342 UMLAL RdLo, RdHi, Rm, Rs
8343 SMLAL RdLo, RdHi, Rm, Rs. */
8348 inst.instruction |= inst.operands[0].reg << 12;
8349 inst.instruction |= inst.operands[1].reg << 16;
8350 inst.instruction |= inst.operands[2].reg;
8351 inst.instruction |= inst.operands[3].reg << 8;
8353 /* rdhi and rdlo must be different. */
8354 if (inst.operands[0].reg == inst.operands[1].reg)
8355 as_tsktsk (_("rdhi and rdlo must be different"));
8357 /* rdhi, rdlo and rm must all be different before armv6. */
8358 if ((inst.operands[0].reg == inst.operands[2].reg
8359 || inst.operands[1].reg == inst.operands[2].reg)
8360 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8361 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8367 if (inst.operands[0].present
8368 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8370 /* Architectural NOP hints are CPSR sets with no bits selected. */
8371 inst.instruction &= 0xf0000000;
8372 inst.instruction |= 0x0320f000;
8373 if (inst.operands[0].present)
8374 inst.instruction |= inst.operands[0].imm;
8378 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8379 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8380 Condition defaults to COND_ALWAYS.
8381 Error if Rd, Rn or Rm are R15. */
8386 inst.instruction |= inst.operands[0].reg << 12;
8387 inst.instruction |= inst.operands[1].reg << 16;
8388 inst.instruction |= inst.operands[2].reg;
8389 if (inst.operands[3].present)
8390 encode_arm_shift (3);
8393 /* ARM V6 PKHTB (Argument Parse). */
8398 if (!inst.operands[3].present)
8400 /* If the shift specifier is omitted, turn the instruction
8401 into pkhbt rd, rm, rn. */
8402 inst.instruction &= 0xfff00010;
8403 inst.instruction |= inst.operands[0].reg << 12;
8404 inst.instruction |= inst.operands[1].reg;
8405 inst.instruction |= inst.operands[2].reg << 16;
8409 inst.instruction |= inst.operands[0].reg << 12;
8410 inst.instruction |= inst.operands[1].reg << 16;
8411 inst.instruction |= inst.operands[2].reg;
8412 encode_arm_shift (3);
8416 /* ARMv5TE: Preload-Cache
8417 MP Extensions: Preload for write
8421 Syntactically, like LDR with B=1, W=0, L=1. */
8426 constraint (!inst.operands[0].isreg,
8427 _("'[' expected after PLD mnemonic"));
8428 constraint (inst.operands[0].postind,
8429 _("post-indexed expression used in preload instruction"));
8430 constraint (inst.operands[0].writeback,
8431 _("writeback used in preload instruction"));
8432 constraint (!inst.operands[0].preind,
8433 _("unindexed addressing used in preload instruction"));
8434 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8437 /* ARMv7: PLI <addr_mode> */
8441 constraint (!inst.operands[0].isreg,
8442 _("'[' expected after PLI mnemonic"));
8443 constraint (inst.operands[0].postind,
8444 _("post-indexed expression used in preload instruction"));
8445 constraint (inst.operands[0].writeback,
8446 _("writeback used in preload instruction"));
8447 constraint (!inst.operands[0].preind,
8448 _("unindexed addressing used in preload instruction"));
8449 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8450 inst.instruction &= ~PRE_INDEX;
8456 inst.operands[1] = inst.operands[0];
8457 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8458 inst.operands[0].isreg = 1;
8459 inst.operands[0].writeback = 1;
8460 inst.operands[0].reg = REG_SP;
8461 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
8464 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8465 word at the specified address and the following word
8467 Unconditionally executed.
8468 Error if Rn is R15. */
8473 inst.instruction |= inst.operands[0].reg << 16;
8474 if (inst.operands[0].writeback)
8475 inst.instruction |= WRITE_BACK;
8478 /* ARM V6 ssat (argument parse). */
8483 inst.instruction |= inst.operands[0].reg << 12;
8484 inst.instruction |= (inst.operands[1].imm - 1) << 16;
8485 inst.instruction |= inst.operands[2].reg;
8487 if (inst.operands[3].present)
8488 encode_arm_shift (3);
8491 /* ARM V6 usat (argument parse). */
8496 inst.instruction |= inst.operands[0].reg << 12;
8497 inst.instruction |= inst.operands[1].imm << 16;
8498 inst.instruction |= inst.operands[2].reg;
8500 if (inst.operands[3].present)
8501 encode_arm_shift (3);
8504 /* ARM V6 ssat16 (argument parse). */
8509 inst.instruction |= inst.operands[0].reg << 12;
8510 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8511 inst.instruction |= inst.operands[2].reg;
8517 inst.instruction |= inst.operands[0].reg << 12;
8518 inst.instruction |= inst.operands[1].imm << 16;
8519 inst.instruction |= inst.operands[2].reg;
8522 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
8523 preserving the other bits.
8525 setend <endian_specifier>, where <endian_specifier> is either
8531 if (warn_on_deprecated
8532 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8533 as_warn (_("setend use is deprecated for ARMv8"));
8535 if (inst.operands[0].imm)
8536 inst.instruction |= 0x200;
8542 unsigned int Rm = (inst.operands[1].present
8543 ? inst.operands[1].reg
8544 : inst.operands[0].reg);
8546 inst.instruction |= inst.operands[0].reg << 12;
8547 inst.instruction |= Rm;
8548 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
8550 inst.instruction |= inst.operands[2].reg << 8;
8551 inst.instruction |= SHIFT_BY_REG;
8552 /* PR 12854: Error on extraneous shifts. */
8553 constraint (inst.operands[2].shifted,
8554 _("extraneous shift as part of operand to shift insn"));
8557 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8563 inst.reloc.type = BFD_RELOC_ARM_SMC;
8564 inst.reloc.pc_rel = 0;
8570 inst.reloc.type = BFD_RELOC_ARM_HVC;
8571 inst.reloc.pc_rel = 0;
8577 inst.reloc.type = BFD_RELOC_ARM_SWI;
8578 inst.reloc.pc_rel = 0;
8581 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8582 SMLAxy{cond} Rd,Rm,Rs,Rn
8583 SMLAWy{cond} Rd,Rm,Rs,Rn
8584 Error if any register is R15. */
8589 inst.instruction |= inst.operands[0].reg << 16;
8590 inst.instruction |= inst.operands[1].reg;
8591 inst.instruction |= inst.operands[2].reg << 8;
8592 inst.instruction |= inst.operands[3].reg << 12;
8595 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8596 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8597 Error if any register is R15.
8598 Warning if Rdlo == Rdhi. */
8603 inst.instruction |= inst.operands[0].reg << 12;
8604 inst.instruction |= inst.operands[1].reg << 16;
8605 inst.instruction |= inst.operands[2].reg;
8606 inst.instruction |= inst.operands[3].reg << 8;
8608 if (inst.operands[0].reg == inst.operands[1].reg)
8609 as_tsktsk (_("rdhi and rdlo must be different"));
8612 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8613 SMULxy{cond} Rd,Rm,Rs
8614 Error if any register is R15. */
8619 inst.instruction |= inst.operands[0].reg << 16;
8620 inst.instruction |= inst.operands[1].reg;
8621 inst.instruction |= inst.operands[2].reg << 8;
8624 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8625 the same for both ARM and Thumb-2. */
8632 if (inst.operands[0].present)
8634 reg = inst.operands[0].reg;
8635 constraint (reg != REG_SP, _("SRS base register must be r13"));
8640 inst.instruction |= reg << 16;
8641 inst.instruction |= inst.operands[1].imm;
8642 if (inst.operands[0].writeback || inst.operands[1].writeback)
8643 inst.instruction |= WRITE_BACK;
8646 /* ARM V6 strex (argument parse). */
8651 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8652 || inst.operands[2].postind || inst.operands[2].writeback
8653 || inst.operands[2].immisreg || inst.operands[2].shifted
8654 || inst.operands[2].negative
8655 /* See comment in do_ldrex(). */
8656 || (inst.operands[2].reg == REG_PC),
8659 constraint (inst.operands[0].reg == inst.operands[1].reg
8660 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8662 constraint (inst.reloc.exp.X_op != O_constant
8663 || inst.reloc.exp.X_add_number != 0,
8664 _("offset must be zero in ARM encoding"));
8666 inst.instruction |= inst.operands[0].reg << 12;
8667 inst.instruction |= inst.operands[1].reg;
8668 inst.instruction |= inst.operands[2].reg << 16;
8669 inst.reloc.type = BFD_RELOC_UNUSED;
8675 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8676 || inst.operands[2].postind || inst.operands[2].writeback
8677 || inst.operands[2].immisreg || inst.operands[2].shifted
8678 || inst.operands[2].negative,
8681 constraint (inst.operands[0].reg == inst.operands[1].reg
8682 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8690 constraint (inst.operands[1].reg % 2 != 0,
8691 _("even register required"));
8692 constraint (inst.operands[2].present
8693 && inst.operands[2].reg != inst.operands[1].reg + 1,
8694 _("can only store two consecutive registers"));
8695 /* If op 2 were present and equal to PC, this function wouldn't
8696 have been called in the first place. */
8697 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8699 constraint (inst.operands[0].reg == inst.operands[1].reg
8700 || inst.operands[0].reg == inst.operands[1].reg + 1
8701 || inst.operands[0].reg == inst.operands[3].reg,
8704 inst.instruction |= inst.operands[0].reg << 12;
8705 inst.instruction |= inst.operands[1].reg;
8706 inst.instruction |= inst.operands[3].reg << 16;
8709 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8710 extends it to 32-bits, and adds the result to a value in another
8711 register. You can specify a rotation by 0, 8, 16, or 24 bits
8712 before extracting the 16-bit value.
8713 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8714 Condition defaults to COND_ALWAYS.
8715 Error if any register uses R15. */
8720 inst.instruction |= inst.operands[0].reg << 12;
8721 inst.instruction |= inst.operands[1].reg << 16;
8722 inst.instruction |= inst.operands[2].reg;
8723 inst.instruction |= inst.operands[3].imm << 10;
8728 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8729 Condition defaults to COND_ALWAYS.
8730 Error if any register uses R15. */
8735 inst.instruction |= inst.operands[0].reg << 12;
8736 inst.instruction |= inst.operands[1].reg;
8737 inst.instruction |= inst.operands[2].imm << 10;
8740 /* VFP instructions. In a logical order: SP variant first, monad
8741 before dyad, arithmetic then move then load/store. */
8744 do_vfp_sp_monadic (void)
8746 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8747 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8751 do_vfp_sp_dyadic (void)
8753 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8754 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8755 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8759 do_vfp_sp_compare_z (void)
8761 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8765 do_vfp_dp_sp_cvt (void)
8767 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8768 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8772 do_vfp_sp_dp_cvt (void)
8774 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8775 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8779 do_vfp_reg_from_sp (void)
8781 inst.instruction |= inst.operands[0].reg << 12;
8782 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8786 do_vfp_reg2_from_sp2 (void)
8788 constraint (inst.operands[2].imm != 2,
8789 _("only two consecutive VFP SP registers allowed here"));
8790 inst.instruction |= inst.operands[0].reg << 12;
8791 inst.instruction |= inst.operands[1].reg << 16;
8792 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8796 do_vfp_sp_from_reg (void)
8798 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8799 inst.instruction |= inst.operands[1].reg << 12;
8803 do_vfp_sp2_from_reg2 (void)
8805 constraint (inst.operands[0].imm != 2,
8806 _("only two consecutive VFP SP registers allowed here"));
8807 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8808 inst.instruction |= inst.operands[1].reg << 12;
8809 inst.instruction |= inst.operands[2].reg << 16;
8813 do_vfp_sp_ldst (void)
8815 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8816 encode_arm_cp_address (1, FALSE, TRUE, 0);
8820 do_vfp_dp_ldst (void)
8822 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8823 encode_arm_cp_address (1, FALSE, TRUE, 0);
8828 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8830 if (inst.operands[0].writeback)
8831 inst.instruction |= WRITE_BACK;
8833 constraint (ldstm_type != VFP_LDSTMIA,
8834 _("this addressing mode requires base-register writeback"));
8835 inst.instruction |= inst.operands[0].reg << 16;
8836 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8837 inst.instruction |= inst.operands[1].imm;
8841 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8845 if (inst.operands[0].writeback)
8846 inst.instruction |= WRITE_BACK;
8848 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8849 _("this addressing mode requires base-register writeback"));
8851 inst.instruction |= inst.operands[0].reg << 16;
8852 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8854 count = inst.operands[1].imm << 1;
8855 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8858 inst.instruction |= count;
8862 do_vfp_sp_ldstmia (void)
8864 vfp_sp_ldstm (VFP_LDSTMIA);
8868 do_vfp_sp_ldstmdb (void)
8870 vfp_sp_ldstm (VFP_LDSTMDB);
8874 do_vfp_dp_ldstmia (void)
8876 vfp_dp_ldstm (VFP_LDSTMIA);
8880 do_vfp_dp_ldstmdb (void)
8882 vfp_dp_ldstm (VFP_LDSTMDB);
8886 do_vfp_xp_ldstmia (void)
8888 vfp_dp_ldstm (VFP_LDSTMIAX);
8892 do_vfp_xp_ldstmdb (void)
8894 vfp_dp_ldstm (VFP_LDSTMDBX);
8898 do_vfp_dp_rd_rm (void)
8900 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8901 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8905 do_vfp_dp_rn_rd (void)
8907 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8908 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8912 do_vfp_dp_rd_rn (void)
8914 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8915 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8919 do_vfp_dp_rd_rn_rm (void)
8921 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8922 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8923 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8929 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8933 do_vfp_dp_rm_rd_rn (void)
8935 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8936 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8937 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8940 /* VFPv3 instructions. */
8942 do_vfp_sp_const (void)
8944 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8945 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8946 inst.instruction |= (inst.operands[1].imm & 0x0f);
8950 do_vfp_dp_const (void)
8952 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8953 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8954 inst.instruction |= (inst.operands[1].imm & 0x0f);
8958 vfp_conv (int srcsize)
8960 int immbits = srcsize - inst.operands[1].imm;
8962 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
8964 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
8965 i.e. immbits must be in range 0 - 16. */
8966 inst.error = _("immediate value out of range, expected range [0, 16]");
8969 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
8971 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
8972 i.e. immbits must be in range 0 - 31. */
8973 inst.error = _("immediate value out of range, expected range [1, 32]");
8977 inst.instruction |= (immbits & 1) << 5;
8978 inst.instruction |= (immbits >> 1);
8982 do_vfp_sp_conv_16 (void)
8984 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8989 do_vfp_dp_conv_16 (void)
8991 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8996 do_vfp_sp_conv_32 (void)
8998 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9003 do_vfp_dp_conv_32 (void)
9005 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9009 /* FPA instructions. Also in a logical order. */
9014 inst.instruction |= inst.operands[0].reg << 16;
9015 inst.instruction |= inst.operands[1].reg;
9019 do_fpa_ldmstm (void)
9021 inst.instruction |= inst.operands[0].reg << 12;
9022 switch (inst.operands[1].imm)
9024 case 1: inst.instruction |= CP_T_X; break;
9025 case 2: inst.instruction |= CP_T_Y; break;
9026 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9031 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9033 /* The instruction specified "ea" or "fd", so we can only accept
9034 [Rn]{!}. The instruction does not really support stacking or
9035 unstacking, so we have to emulate these by setting appropriate
9036 bits and offsets. */
9037 constraint (inst.reloc.exp.X_op != O_constant
9038 || inst.reloc.exp.X_add_number != 0,
9039 _("this instruction does not support indexing"));
9041 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9042 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9044 if (!(inst.instruction & INDEX_UP))
9045 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9047 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9049 inst.operands[2].preind = 0;
9050 inst.operands[2].postind = 1;
9054 encode_arm_cp_address (2, TRUE, TRUE, 0);
9057 /* iWMMXt instructions: strictly in alphabetical order. */
9060 do_iwmmxt_tandorc (void)
9062 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9066 do_iwmmxt_textrc (void)
9068 inst.instruction |= inst.operands[0].reg << 12;
9069 inst.instruction |= inst.operands[1].imm;
9073 do_iwmmxt_textrm (void)
9075 inst.instruction |= inst.operands[0].reg << 12;
9076 inst.instruction |= inst.operands[1].reg << 16;
9077 inst.instruction |= inst.operands[2].imm;
9081 do_iwmmxt_tinsr (void)
9083 inst.instruction |= inst.operands[0].reg << 16;
9084 inst.instruction |= inst.operands[1].reg << 12;
9085 inst.instruction |= inst.operands[2].imm;
9089 do_iwmmxt_tmia (void)
9091 inst.instruction |= inst.operands[0].reg << 5;
9092 inst.instruction |= inst.operands[1].reg;
9093 inst.instruction |= inst.operands[2].reg << 12;
9097 do_iwmmxt_waligni (void)
9099 inst.instruction |= inst.operands[0].reg << 12;
9100 inst.instruction |= inst.operands[1].reg << 16;
9101 inst.instruction |= inst.operands[2].reg;
9102 inst.instruction |= inst.operands[3].imm << 20;
9106 do_iwmmxt_wmerge (void)
9108 inst.instruction |= inst.operands[0].reg << 12;
9109 inst.instruction |= inst.operands[1].reg << 16;
9110 inst.instruction |= inst.operands[2].reg;
9111 inst.instruction |= inst.operands[3].imm << 21;
9115 do_iwmmxt_wmov (void)
9117 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9118 inst.instruction |= inst.operands[0].reg << 12;
9119 inst.instruction |= inst.operands[1].reg << 16;
9120 inst.instruction |= inst.operands[1].reg;
9124 do_iwmmxt_wldstbh (void)
9127 inst.instruction |= inst.operands[0].reg << 12;
9129 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9131 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9132 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9136 do_iwmmxt_wldstw (void)
9138 /* RIWR_RIWC clears .isreg for a control register. */
9139 if (!inst.operands[0].isreg)
9141 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9142 inst.instruction |= 0xf0000000;
9145 inst.instruction |= inst.operands[0].reg << 12;
9146 encode_arm_cp_address (1, TRUE, TRUE, 0);
9150 do_iwmmxt_wldstd (void)
9152 inst.instruction |= inst.operands[0].reg << 12;
9153 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9154 && inst.operands[1].immisreg)
9156 inst.instruction &= ~0x1a000ff;
9157 inst.instruction |= (0xf << 28);
9158 if (inst.operands[1].preind)
9159 inst.instruction |= PRE_INDEX;
9160 if (!inst.operands[1].negative)
9161 inst.instruction |= INDEX_UP;
9162 if (inst.operands[1].writeback)
9163 inst.instruction |= WRITE_BACK;
9164 inst.instruction |= inst.operands[1].reg << 16;
9165 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9166 inst.instruction |= inst.operands[1].imm;
9169 encode_arm_cp_address (1, TRUE, FALSE, 0);
9173 do_iwmmxt_wshufh (void)
9175 inst.instruction |= inst.operands[0].reg << 12;
9176 inst.instruction |= inst.operands[1].reg << 16;
9177 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9178 inst.instruction |= (inst.operands[2].imm & 0x0f);
9182 do_iwmmxt_wzero (void)
9184 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9185 inst.instruction |= inst.operands[0].reg;
9186 inst.instruction |= inst.operands[0].reg << 12;
9187 inst.instruction |= inst.operands[0].reg << 16;
9191 do_iwmmxt_wrwrwr_or_imm5 (void)
9193 if (inst.operands[2].isreg)
9196 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9197 _("immediate operand requires iWMMXt2"));
9199 if (inst.operands[2].imm == 0)
9201 switch ((inst.instruction >> 20) & 0xf)
9207 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9208 inst.operands[2].imm = 16;
9209 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9215 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9216 inst.operands[2].imm = 32;
9217 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9224 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9226 wrn = (inst.instruction >> 16) & 0xf;
9227 inst.instruction &= 0xff0fff0f;
9228 inst.instruction |= wrn;
9229 /* Bail out here; the instruction is now assembled. */
9234 /* Map 32 -> 0, etc. */
9235 inst.operands[2].imm &= 0x1f;
9236 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9240 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9241 operations first, then control, shift, and load/store. */
9243 /* Insns like "foo X,Y,Z". */
9246 do_mav_triple (void)
9248 inst.instruction |= inst.operands[0].reg << 16;
9249 inst.instruction |= inst.operands[1].reg;
9250 inst.instruction |= inst.operands[2].reg << 12;
9253 /* Insns like "foo W,X,Y,Z".
9254 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9259 inst.instruction |= inst.operands[0].reg << 5;
9260 inst.instruction |= inst.operands[1].reg << 12;
9261 inst.instruction |= inst.operands[2].reg << 16;
9262 inst.instruction |= inst.operands[3].reg;
9265 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9269 inst.instruction |= inst.operands[1].reg << 12;
9272 /* Maverick shift immediate instructions.
9273 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9274 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9279 int imm = inst.operands[2].imm;
9281 inst.instruction |= inst.operands[0].reg << 12;
9282 inst.instruction |= inst.operands[1].reg << 16;
9284 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9285 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9286 Bit 4 should be 0. */
9287 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9289 inst.instruction |= imm;
9292 /* XScale instructions. Also sorted arithmetic before move. */
9294 /* Xscale multiply-accumulate (argument parse)
9297 MIAxycc acc0,Rm,Rs. */
9302 inst.instruction |= inst.operands[1].reg;
9303 inst.instruction |= inst.operands[2].reg << 12;
9306 /* Xscale move-accumulator-register (argument parse)
9308 MARcc acc0,RdLo,RdHi. */
9313 inst.instruction |= inst.operands[1].reg << 12;
9314 inst.instruction |= inst.operands[2].reg << 16;
9317 /* Xscale move-register-accumulator (argument parse)
9319 MRAcc RdLo,RdHi,acc0. */
9324 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9325 inst.instruction |= inst.operands[0].reg << 12;
9326 inst.instruction |= inst.operands[1].reg << 16;
9329 /* Encoding functions relevant only to Thumb. */
9331 /* inst.operands[i] is a shifted-register operand; encode
9332 it into inst.instruction in the format used by Thumb32. */
9335 encode_thumb32_shifted_operand (int i)
9337 unsigned int value = inst.reloc.exp.X_add_number;
9338 unsigned int shift = inst.operands[i].shift_kind;
9340 constraint (inst.operands[i].immisreg,
9341 _("shift by register not allowed in thumb mode"));
9342 inst.instruction |= inst.operands[i].reg;
9343 if (shift == SHIFT_RRX)
9344 inst.instruction |= SHIFT_ROR << 4;
9347 constraint (inst.reloc.exp.X_op != O_constant,
9348 _("expression too complex"));
9350 constraint (value > 32
9351 || (value == 32 && (shift == SHIFT_LSL
9352 || shift == SHIFT_ROR)),
9353 _("shift expression is too large"));
9357 else if (value == 32)
9360 inst.instruction |= shift << 4;
9361 inst.instruction |= (value & 0x1c) << 10;
9362 inst.instruction |= (value & 0x03) << 6;
9367 /* inst.operands[i] was set up by parse_address. Encode it into a
9368 Thumb32 format load or store instruction. Reject forms that cannot
9369 be used with such instructions. If is_t is true, reject forms that
9370 cannot be used with a T instruction; if is_d is true, reject forms
9371 that cannot be used with a D instruction. If it is a store insn,
9375 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9377 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9379 constraint (!inst.operands[i].isreg,
9380 _("Instruction does not support =N addresses"));
9382 inst.instruction |= inst.operands[i].reg << 16;
9383 if (inst.operands[i].immisreg)
9385 constraint (is_pc, BAD_PC_ADDRESSING);
9386 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9387 constraint (inst.operands[i].negative,
9388 _("Thumb does not support negative register indexing"));
9389 constraint (inst.operands[i].postind,
9390 _("Thumb does not support register post-indexing"));
9391 constraint (inst.operands[i].writeback,
9392 _("Thumb does not support register indexing with writeback"));
9393 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9394 _("Thumb supports only LSL in shifted register indexing"));
9396 inst.instruction |= inst.operands[i].imm;
9397 if (inst.operands[i].shifted)
9399 constraint (inst.reloc.exp.X_op != O_constant,
9400 _("expression too complex"));
9401 constraint (inst.reloc.exp.X_add_number < 0
9402 || inst.reloc.exp.X_add_number > 3,
9403 _("shift out of range"));
9404 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9406 inst.reloc.type = BFD_RELOC_UNUSED;
9408 else if (inst.operands[i].preind)
9410 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9411 constraint (is_t && inst.operands[i].writeback,
9412 _("cannot use writeback with this instruction"));
9413 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
9414 && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
9418 inst.instruction |= 0x01000000;
9419 if (inst.operands[i].writeback)
9420 inst.instruction |= 0x00200000;
9424 inst.instruction |= 0x00000c00;
9425 if (inst.operands[i].writeback)
9426 inst.instruction |= 0x00000100;
9428 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9430 else if (inst.operands[i].postind)
9432 gas_assert (inst.operands[i].writeback);
9433 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9434 constraint (is_t, _("cannot use post-indexing with this instruction"));
9437 inst.instruction |= 0x00200000;
9439 inst.instruction |= 0x00000900;
9440 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9442 else /* unindexed - only for coprocessor */
9443 inst.error = _("instruction does not accept unindexed addressing");
9446 /* Table of Thumb instructions which exist in both 16- and 32-bit
9447 encodings (the latter only in post-V6T2 cores). The index is the
9448 value used in the insns table below. When there is more than one
9449 possible 16-bit encoding for the instruction, this table always
9451 Also contains several pseudo-instructions used during relaxation. */
9452 #define T16_32_TAB \
9453 X(_adc, 4140, eb400000), \
9454 X(_adcs, 4140, eb500000), \
9455 X(_add, 1c00, eb000000), \
9456 X(_adds, 1c00, eb100000), \
9457 X(_addi, 0000, f1000000), \
9458 X(_addis, 0000, f1100000), \
9459 X(_add_pc,000f, f20f0000), \
9460 X(_add_sp,000d, f10d0000), \
9461 X(_adr, 000f, f20f0000), \
9462 X(_and, 4000, ea000000), \
9463 X(_ands, 4000, ea100000), \
9464 X(_asr, 1000, fa40f000), \
9465 X(_asrs, 1000, fa50f000), \
9466 X(_b, e000, f000b000), \
9467 X(_bcond, d000, f0008000), \
9468 X(_bic, 4380, ea200000), \
9469 X(_bics, 4380, ea300000), \
9470 X(_cmn, 42c0, eb100f00), \
9471 X(_cmp, 2800, ebb00f00), \
9472 X(_cpsie, b660, f3af8400), \
9473 X(_cpsid, b670, f3af8600), \
9474 X(_cpy, 4600, ea4f0000), \
9475 X(_dec_sp,80dd, f1ad0d00), \
9476 X(_eor, 4040, ea800000), \
9477 X(_eors, 4040, ea900000), \
9478 X(_inc_sp,00dd, f10d0d00), \
9479 X(_ldmia, c800, e8900000), \
9480 X(_ldr, 6800, f8500000), \
9481 X(_ldrb, 7800, f8100000), \
9482 X(_ldrh, 8800, f8300000), \
9483 X(_ldrsb, 5600, f9100000), \
9484 X(_ldrsh, 5e00, f9300000), \
9485 X(_ldr_pc,4800, f85f0000), \
9486 X(_ldr_pc2,4800, f85f0000), \
9487 X(_ldr_sp,9800, f85d0000), \
9488 X(_lsl, 0000, fa00f000), \
9489 X(_lsls, 0000, fa10f000), \
9490 X(_lsr, 0800, fa20f000), \
9491 X(_lsrs, 0800, fa30f000), \
9492 X(_mov, 2000, ea4f0000), \
9493 X(_movs, 2000, ea5f0000), \
9494 X(_mul, 4340, fb00f000), \
9495 X(_muls, 4340, ffffffff), /* no 32b muls */ \
9496 X(_mvn, 43c0, ea6f0000), \
9497 X(_mvns, 43c0, ea7f0000), \
9498 X(_neg, 4240, f1c00000), /* rsb #0 */ \
9499 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
9500 X(_orr, 4300, ea400000), \
9501 X(_orrs, 4300, ea500000), \
9502 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
9503 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
9504 X(_rev, ba00, fa90f080), \
9505 X(_rev16, ba40, fa90f090), \
9506 X(_revsh, bac0, fa90f0b0), \
9507 X(_ror, 41c0, fa60f000), \
9508 X(_rors, 41c0, fa70f000), \
9509 X(_sbc, 4180, eb600000), \
9510 X(_sbcs, 4180, eb700000), \
9511 X(_stmia, c000, e8800000), \
9512 X(_str, 6000, f8400000), \
9513 X(_strb, 7000, f8000000), \
9514 X(_strh, 8000, f8200000), \
9515 X(_str_sp,9000, f84d0000), \
9516 X(_sub, 1e00, eba00000), \
9517 X(_subs, 1e00, ebb00000), \
9518 X(_subi, 8000, f1a00000), \
9519 X(_subis, 8000, f1b00000), \
9520 X(_sxtb, b240, fa4ff080), \
9521 X(_sxth, b200, fa0ff080), \
9522 X(_tst, 4200, ea100f00), \
9523 X(_uxtb, b2c0, fa5ff080), \
9524 X(_uxth, b280, fa1ff080), \
9525 X(_nop, bf00, f3af8000), \
9526 X(_yield, bf10, f3af8001), \
9527 X(_wfe, bf20, f3af8002), \
9528 X(_wfi, bf30, f3af8003), \
9529 X(_sev, bf40, f3af8004),
9531 /* To catch errors in encoding functions, the codes are all offset by
9532 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9533 as 16-bit instructions. */
9534 #define X(a,b,c) T_MNEM##a
9535 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9538 #define X(a,b,c) 0x##b
9539 static const unsigned short thumb_op16[] = { T16_32_TAB };
9540 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9543 #define X(a,b,c) 0x##c
9544 static const unsigned int thumb_op32[] = { T16_32_TAB };
9545 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9546 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
9550 /* Thumb instruction encoders, in alphabetical order. */
9555 do_t_add_sub_w (void)
9559 Rd = inst.operands[0].reg;
9560 Rn = inst.operands[1].reg;
9562 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9563 is the SP-{plus,minus}-immediate form of the instruction. */
9565 constraint (Rd == REG_PC, BAD_PC);
9567 reject_bad_reg (Rd);
9569 inst.instruction |= (Rn << 16) | (Rd << 8);
9570 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9573 /* Parse an add or subtract instruction. We get here with inst.instruction
9574 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
9581 Rd = inst.operands[0].reg;
9582 Rs = (inst.operands[1].present
9583 ? inst.operands[1].reg /* Rd, Rs, foo */
9584 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9587 set_it_insn_type_last ();
9595 flags = (inst.instruction == T_MNEM_adds
9596 || inst.instruction == T_MNEM_subs);
9598 narrow = !in_it_block ();
9600 narrow = in_it_block ();
9601 if (!inst.operands[2].isreg)
9605 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9607 add = (inst.instruction == T_MNEM_add
9608 || inst.instruction == T_MNEM_adds);
9610 if (inst.size_req != 4)
9612 /* Attempt to use a narrow opcode, with relaxation if
9614 if (Rd == REG_SP && Rs == REG_SP && !flags)
9615 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9616 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9617 opcode = T_MNEM_add_sp;
9618 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9619 opcode = T_MNEM_add_pc;
9620 else if (Rd <= 7 && Rs <= 7 && narrow)
9623 opcode = add ? T_MNEM_addis : T_MNEM_subis;
9625 opcode = add ? T_MNEM_addi : T_MNEM_subi;
9629 inst.instruction = THUMB_OP16(opcode);
9630 inst.instruction |= (Rd << 4) | Rs;
9631 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9632 if (inst.size_req != 2)
9633 inst.relax = opcode;
9636 constraint (inst.size_req == 2, BAD_HIREG);
9638 if (inst.size_req == 4
9639 || (inst.size_req != 2 && !opcode))
9643 constraint (add, BAD_PC);
9644 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9645 _("only SUBS PC, LR, #const allowed"));
9646 constraint (inst.reloc.exp.X_op != O_constant,
9647 _("expression too complex"));
9648 constraint (inst.reloc.exp.X_add_number < 0
9649 || inst.reloc.exp.X_add_number > 0xff,
9650 _("immediate value out of range"));
9651 inst.instruction = T2_SUBS_PC_LR
9652 | inst.reloc.exp.X_add_number;
9653 inst.reloc.type = BFD_RELOC_UNUSED;
9656 else if (Rs == REG_PC)
9658 /* Always use addw/subw. */
9659 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9660 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9664 inst.instruction = THUMB_OP32 (inst.instruction);
9665 inst.instruction = (inst.instruction & 0xe1ffffff)
9668 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9670 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9672 inst.instruction |= Rd << 8;
9673 inst.instruction |= Rs << 16;
9678 unsigned int value = inst.reloc.exp.X_add_number;
9679 unsigned int shift = inst.operands[2].shift_kind;
9681 Rn = inst.operands[2].reg;
9682 /* See if we can do this with a 16-bit instruction. */
9683 if (!inst.operands[2].shifted && inst.size_req != 4)
9685 if (Rd > 7 || Rs > 7 || Rn > 7)
9690 inst.instruction = ((inst.instruction == T_MNEM_adds
9691 || inst.instruction == T_MNEM_add)
9694 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9698 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9700 /* Thumb-1 cores (except v6-M) require at least one high
9701 register in a narrow non flag setting add. */
9702 if (Rd > 7 || Rn > 7
9703 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9704 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9711 inst.instruction = T_OPCODE_ADD_HI;
9712 inst.instruction |= (Rd & 8) << 4;
9713 inst.instruction |= (Rd & 7);
9714 inst.instruction |= Rn << 3;
9720 constraint (Rd == REG_PC, BAD_PC);
9721 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9722 constraint (Rs == REG_PC, BAD_PC);
9723 reject_bad_reg (Rn);
9725 /* If we get here, it can't be done in 16 bits. */
9726 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9727 _("shift must be constant"));
9728 inst.instruction = THUMB_OP32 (inst.instruction);
9729 inst.instruction |= Rd << 8;
9730 inst.instruction |= Rs << 16;
9731 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9732 _("shift value over 3 not allowed in thumb mode"));
9733 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9734 _("only LSL shift allowed in thumb mode"));
9735 encode_thumb32_shifted_operand (2);
9740 constraint (inst.instruction == T_MNEM_adds
9741 || inst.instruction == T_MNEM_subs,
9744 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9746 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9747 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9750 inst.instruction = (inst.instruction == T_MNEM_add
9752 inst.instruction |= (Rd << 4) | Rs;
9753 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9757 Rn = inst.operands[2].reg;
9758 constraint (inst.operands[2].shifted, _("unshifted register required"));
9760 /* We now have Rd, Rs, and Rn set to registers. */
9761 if (Rd > 7 || Rs > 7 || Rn > 7)
9763 /* Can't do this for SUB. */
9764 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9765 inst.instruction = T_OPCODE_ADD_HI;
9766 inst.instruction |= (Rd & 8) << 4;
9767 inst.instruction |= (Rd & 7);
9769 inst.instruction |= Rn << 3;
9771 inst.instruction |= Rs << 3;
9773 constraint (1, _("dest must overlap one source register"));
9777 inst.instruction = (inst.instruction == T_MNEM_add
9778 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9779 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9789 Rd = inst.operands[0].reg;
9790 reject_bad_reg (Rd);
9792 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9794 /* Defer to section relaxation. */
9795 inst.relax = inst.instruction;
9796 inst.instruction = THUMB_OP16 (inst.instruction);
9797 inst.instruction |= Rd << 4;
9799 else if (unified_syntax && inst.size_req != 2)
9801 /* Generate a 32-bit opcode. */
9802 inst.instruction = THUMB_OP32 (inst.instruction);
9803 inst.instruction |= Rd << 8;
9804 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9805 inst.reloc.pc_rel = 1;
9809 /* Generate a 16-bit opcode. */
9810 inst.instruction = THUMB_OP16 (inst.instruction);
9811 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9812 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9813 inst.reloc.pc_rel = 1;
9815 inst.instruction |= Rd << 4;
9819 /* Arithmetic instructions for which there is just one 16-bit
9820 instruction encoding, and it allows only two low registers.
9821 For maximal compatibility with ARM syntax, we allow three register
9822 operands even when Thumb-32 instructions are not available, as long
9823 as the first two are identical. For instance, both "sbc r0,r1" and
9824 "sbc r0,r0,r1" are allowed. */
9830 Rd = inst.operands[0].reg;
9831 Rs = (inst.operands[1].present
9832 ? inst.operands[1].reg /* Rd, Rs, foo */
9833 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9834 Rn = inst.operands[2].reg;
9836 reject_bad_reg (Rd);
9837 reject_bad_reg (Rs);
9838 if (inst.operands[2].isreg)
9839 reject_bad_reg (Rn);
9843 if (!inst.operands[2].isreg)
9845 /* For an immediate, we always generate a 32-bit opcode;
9846 section relaxation will shrink it later if possible. */
9847 inst.instruction = THUMB_OP32 (inst.instruction);
9848 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9849 inst.instruction |= Rd << 8;
9850 inst.instruction |= Rs << 16;
9851 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9857 /* See if we can do this with a 16-bit instruction. */
9858 if (THUMB_SETS_FLAGS (inst.instruction))
9859 narrow = !in_it_block ();
9861 narrow = in_it_block ();
9863 if (Rd > 7 || Rn > 7 || Rs > 7)
9865 if (inst.operands[2].shifted)
9867 if (inst.size_req == 4)
9873 inst.instruction = THUMB_OP16 (inst.instruction);
9874 inst.instruction |= Rd;
9875 inst.instruction |= Rn << 3;
9879 /* If we get here, it can't be done in 16 bits. */
9880 constraint (inst.operands[2].shifted
9881 && inst.operands[2].immisreg,
9882 _("shift must be constant"));
9883 inst.instruction = THUMB_OP32 (inst.instruction);
9884 inst.instruction |= Rd << 8;
9885 inst.instruction |= Rs << 16;
9886 encode_thumb32_shifted_operand (2);
9891 /* On its face this is a lie - the instruction does set the
9892 flags. However, the only supported mnemonic in this mode
9894 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9896 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9897 _("unshifted register required"));
9898 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9899 constraint (Rd != Rs,
9900 _("dest and source1 must be the same register"));
9902 inst.instruction = THUMB_OP16 (inst.instruction);
9903 inst.instruction |= Rd;
9904 inst.instruction |= Rn << 3;
9908 /* Similarly, but for instructions where the arithmetic operation is
9909 commutative, so we can allow either of them to be different from
9910 the destination operand in a 16-bit instruction. For instance, all
9911 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9918 Rd = inst.operands[0].reg;
9919 Rs = (inst.operands[1].present
9920 ? inst.operands[1].reg /* Rd, Rs, foo */
9921 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9922 Rn = inst.operands[2].reg;
9924 reject_bad_reg (Rd);
9925 reject_bad_reg (Rs);
9926 if (inst.operands[2].isreg)
9927 reject_bad_reg (Rn);
9931 if (!inst.operands[2].isreg)
9933 /* For an immediate, we always generate a 32-bit opcode;
9934 section relaxation will shrink it later if possible. */
9935 inst.instruction = THUMB_OP32 (inst.instruction);
9936 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9937 inst.instruction |= Rd << 8;
9938 inst.instruction |= Rs << 16;
9939 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9945 /* See if we can do this with a 16-bit instruction. */
9946 if (THUMB_SETS_FLAGS (inst.instruction))
9947 narrow = !in_it_block ();
9949 narrow = in_it_block ();
9951 if (Rd > 7 || Rn > 7 || Rs > 7)
9953 if (inst.operands[2].shifted)
9955 if (inst.size_req == 4)
9962 inst.instruction = THUMB_OP16 (inst.instruction);
9963 inst.instruction |= Rd;
9964 inst.instruction |= Rn << 3;
9969 inst.instruction = THUMB_OP16 (inst.instruction);
9970 inst.instruction |= Rd;
9971 inst.instruction |= Rs << 3;
9976 /* If we get here, it can't be done in 16 bits. */
9977 constraint (inst.operands[2].shifted
9978 && inst.operands[2].immisreg,
9979 _("shift must be constant"));
9980 inst.instruction = THUMB_OP32 (inst.instruction);
9981 inst.instruction |= Rd << 8;
9982 inst.instruction |= Rs << 16;
9983 encode_thumb32_shifted_operand (2);
9988 /* On its face this is a lie - the instruction does set the
9989 flags. However, the only supported mnemonic in this mode
9991 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9993 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9994 _("unshifted register required"));
9995 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9997 inst.instruction = THUMB_OP16 (inst.instruction);
9998 inst.instruction |= Rd;
10001 inst.instruction |= Rn << 3;
10003 inst.instruction |= Rs << 3;
10005 constraint (1, _("dest must overlap one source register"));
10010 do_t_barrier (void)
10012 if (inst.operands[0].present)
10014 constraint ((inst.instruction & 0xf0) != 0x40
10015 && inst.operands[0].imm > 0xf
10016 && inst.operands[0].imm < 0x0,
10017 _("bad barrier type"));
10018 inst.instruction |= inst.operands[0].imm;
10021 inst.instruction |= 0xf;
10028 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10029 constraint (msb > 32, _("bit-field extends past end of register"));
10030 /* The instruction encoding stores the LSB and MSB,
10031 not the LSB and width. */
10032 Rd = inst.operands[0].reg;
10033 reject_bad_reg (Rd);
10034 inst.instruction |= Rd << 8;
10035 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10036 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10037 inst.instruction |= msb - 1;
10046 Rd = inst.operands[0].reg;
10047 reject_bad_reg (Rd);
10049 /* #0 in second position is alternative syntax for bfc, which is
10050 the same instruction but with REG_PC in the Rm field. */
10051 if (!inst.operands[1].isreg)
10055 Rn = inst.operands[1].reg;
10056 reject_bad_reg (Rn);
10059 msb = inst.operands[2].imm + inst.operands[3].imm;
10060 constraint (msb > 32, _("bit-field extends past end of register"));
10061 /* The instruction encoding stores the LSB and MSB,
10062 not the LSB and width. */
10063 inst.instruction |= Rd << 8;
10064 inst.instruction |= Rn << 16;
10065 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10066 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10067 inst.instruction |= msb - 1;
10075 Rd = inst.operands[0].reg;
10076 Rn = inst.operands[1].reg;
10078 reject_bad_reg (Rd);
10079 reject_bad_reg (Rn);
10081 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10082 _("bit-field extends past end of register"));
10083 inst.instruction |= Rd << 8;
10084 inst.instruction |= Rn << 16;
10085 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10086 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10087 inst.instruction |= inst.operands[3].imm - 1;
10090 /* ARM V5 Thumb BLX (argument parse)
10091 BLX <target_addr> which is BLX(1)
10092 BLX <Rm> which is BLX(2)
10093 Unfortunately, there are two different opcodes for this mnemonic.
10094 So, the insns[].value is not used, and the code here zaps values
10095 into inst.instruction.
10097 ??? How to take advantage of the additional two bits of displacement
10098 available in Thumb32 mode? Need new relocation? */
10103 set_it_insn_type_last ();
10105 if (inst.operands[0].isreg)
10107 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10108 /* We have a register, so this is BLX(2). */
10109 inst.instruction |= inst.operands[0].reg << 3;
10113 /* No register. This must be BLX(1). */
10114 inst.instruction = 0xf000e800;
10115 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10127 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10129 if (in_it_block ())
10131 /* Conditional branches inside IT blocks are encoded as unconditional
10133 cond = COND_ALWAYS;
10138 if (cond != COND_ALWAYS)
10139 opcode = T_MNEM_bcond;
10141 opcode = inst.instruction;
10144 && (inst.size_req == 4
10145 || (inst.size_req != 2
10146 && (inst.operands[0].hasreloc
10147 || inst.reloc.exp.X_op == O_constant))))
10149 inst.instruction = THUMB_OP32(opcode);
10150 if (cond == COND_ALWAYS)
10151 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10154 gas_assert (cond != 0xF);
10155 inst.instruction |= cond << 22;
10156 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10161 inst.instruction = THUMB_OP16(opcode);
10162 if (cond == COND_ALWAYS)
10163 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10166 inst.instruction |= cond << 8;
10167 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10169 /* Allow section relaxation. */
10170 if (unified_syntax && inst.size_req != 2)
10171 inst.relax = opcode;
10173 inst.reloc.type = reloc;
10174 inst.reloc.pc_rel = 1;
10180 constraint (inst.cond != COND_ALWAYS,
10181 _("instruction is always unconditional"));
10182 if (inst.operands[0].present)
10184 constraint (inst.operands[0].imm > 255,
10185 _("immediate value out of range"));
10186 inst.instruction |= inst.operands[0].imm;
10187 set_it_insn_type (NEUTRAL_IT_INSN);
10192 do_t_branch23 (void)
10194 set_it_insn_type_last ();
10195 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10197 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10198 this file. We used to simply ignore the PLT reloc type here --
10199 the branch encoding is now needed to deal with TLSCALL relocs.
10200 So if we see a PLT reloc now, put it back to how it used to be to
10201 keep the preexisting behaviour. */
10202 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10203 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10205 #if defined(OBJ_COFF)
10206 /* If the destination of the branch is a defined symbol which does not have
10207 the THUMB_FUNC attribute, then we must be calling a function which has
10208 the (interfacearm) attribute. We look for the Thumb entry point to that
10209 function and change the branch to refer to that function instead. */
10210 if ( inst.reloc.exp.X_op == O_symbol
10211 && inst.reloc.exp.X_add_symbol != NULL
10212 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10213 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10214 inst.reloc.exp.X_add_symbol =
10215 find_real_start (inst.reloc.exp.X_add_symbol);
10222 set_it_insn_type_last ();
10223 inst.instruction |= inst.operands[0].reg << 3;
10224 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10225 should cause the alignment to be checked once it is known. This is
10226 because BX PC only works if the instruction is word aligned. */
10234 set_it_insn_type_last ();
10235 Rm = inst.operands[0].reg;
10236 reject_bad_reg (Rm);
10237 inst.instruction |= Rm << 16;
10246 Rd = inst.operands[0].reg;
10247 Rm = inst.operands[1].reg;
10249 reject_bad_reg (Rd);
10250 reject_bad_reg (Rm);
10252 inst.instruction |= Rd << 8;
10253 inst.instruction |= Rm << 16;
10254 inst.instruction |= Rm;
10260 set_it_insn_type (OUTSIDE_IT_INSN);
10261 inst.instruction |= inst.operands[0].imm;
10267 set_it_insn_type (OUTSIDE_IT_INSN);
10269 && (inst.operands[1].present || inst.size_req == 4)
10270 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10272 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10273 inst.instruction = 0xf3af8000;
10274 inst.instruction |= imod << 9;
10275 inst.instruction |= inst.operands[0].imm << 5;
10276 if (inst.operands[1].present)
10277 inst.instruction |= 0x100 | inst.operands[1].imm;
10281 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10282 && (inst.operands[0].imm & 4),
10283 _("selected processor does not support 'A' form "
10284 "of this instruction"));
10285 constraint (inst.operands[1].present || inst.size_req == 4,
10286 _("Thumb does not support the 2-argument "
10287 "form of this instruction"));
10288 inst.instruction |= inst.operands[0].imm;
10292 /* THUMB CPY instruction (argument parse). */
10297 if (inst.size_req == 4)
10299 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10300 inst.instruction |= inst.operands[0].reg << 8;
10301 inst.instruction |= inst.operands[1].reg;
10305 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10306 inst.instruction |= (inst.operands[0].reg & 0x7);
10307 inst.instruction |= inst.operands[1].reg << 3;
10314 set_it_insn_type (OUTSIDE_IT_INSN);
10315 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10316 inst.instruction |= inst.operands[0].reg;
10317 inst.reloc.pc_rel = 1;
10318 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10324 inst.instruction |= inst.operands[0].imm;
10330 unsigned Rd, Rn, Rm;
10332 Rd = inst.operands[0].reg;
10333 Rn = (inst.operands[1].present
10334 ? inst.operands[1].reg : Rd);
10335 Rm = inst.operands[2].reg;
10337 reject_bad_reg (Rd);
10338 reject_bad_reg (Rn);
10339 reject_bad_reg (Rm);
10341 inst.instruction |= Rd << 8;
10342 inst.instruction |= Rn << 16;
10343 inst.instruction |= Rm;
10349 if (unified_syntax && inst.size_req == 4)
10350 inst.instruction = THUMB_OP32 (inst.instruction);
10352 inst.instruction = THUMB_OP16 (inst.instruction);
10358 unsigned int cond = inst.operands[0].imm;
10360 set_it_insn_type (IT_INSN);
10361 now_it.mask = (inst.instruction & 0xf) | 0x10;
10363 now_it.warn_deprecated = FALSE;
10365 /* If the condition is a negative condition, invert the mask. */
10366 if ((cond & 0x1) == 0x0)
10368 unsigned int mask = inst.instruction & 0x000f;
10370 if ((mask & 0x7) == 0)
10372 /* No conversion needed. */
10373 now_it.block_length = 1;
10375 else if ((mask & 0x3) == 0)
10378 now_it.block_length = 2;
10380 else if ((mask & 0x1) == 0)
10383 now_it.block_length = 3;
10388 now_it.block_length = 4;
10391 inst.instruction &= 0xfff0;
10392 inst.instruction |= mask;
10395 inst.instruction |= cond << 4;
10398 /* Helper function used for both push/pop and ldm/stm. */
10400 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10404 load = (inst.instruction & (1 << 20)) != 0;
10406 if (mask & (1 << 13))
10407 inst.error = _("SP not allowed in register list");
10409 if ((mask & (1 << base)) != 0
10411 inst.error = _("having the base register in the register list when "
10412 "using write back is UNPREDICTABLE");
10416 if (mask & (1 << 15))
10418 if (mask & (1 << 14))
10419 inst.error = _("LR and PC should not both be in register list");
10421 set_it_insn_type_last ();
10426 if (mask & (1 << 15))
10427 inst.error = _("PC not allowed in register list");
10430 if ((mask & (mask - 1)) == 0)
10432 /* Single register transfers implemented as str/ldr. */
10435 if (inst.instruction & (1 << 23))
10436 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10438 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10442 if (inst.instruction & (1 << 23))
10443 inst.instruction = 0x00800000; /* ia -> [base] */
10445 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10448 inst.instruction |= 0xf8400000;
10450 inst.instruction |= 0x00100000;
10452 mask = ffs (mask) - 1;
10455 else if (writeback)
10456 inst.instruction |= WRITE_BACK;
10458 inst.instruction |= mask;
10459 inst.instruction |= base << 16;
10465 /* This really doesn't seem worth it. */
10466 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10467 _("expression too complex"));
10468 constraint (inst.operands[1].writeback,
10469 _("Thumb load/store multiple does not support {reglist}^"));
10471 if (unified_syntax)
10473 bfd_boolean narrow;
10477 /* See if we can use a 16-bit instruction. */
10478 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10479 && inst.size_req != 4
10480 && !(inst.operands[1].imm & ~0xff))
10482 mask = 1 << inst.operands[0].reg;
10484 if (inst.operands[0].reg <= 7)
10486 if (inst.instruction == T_MNEM_stmia
10487 ? inst.operands[0].writeback
10488 : (inst.operands[0].writeback
10489 == !(inst.operands[1].imm & mask)))
10491 if (inst.instruction == T_MNEM_stmia
10492 && (inst.operands[1].imm & mask)
10493 && (inst.operands[1].imm & (mask - 1)))
10494 as_warn (_("value stored for r%d is UNKNOWN"),
10495 inst.operands[0].reg);
10497 inst.instruction = THUMB_OP16 (inst.instruction);
10498 inst.instruction |= inst.operands[0].reg << 8;
10499 inst.instruction |= inst.operands[1].imm;
10502 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10504 /* This means 1 register in reg list one of 3 situations:
10505 1. Instruction is stmia, but without writeback.
10506 2. lmdia without writeback, but with Rn not in
10508 3. ldmia with writeback, but with Rn in reglist.
10509 Case 3 is UNPREDICTABLE behaviour, so we handle
10510 case 1 and 2 which can be converted into a 16-bit
10511 str or ldr. The SP cases are handled below. */
10512 unsigned long opcode;
10513 /* First, record an error for Case 3. */
10514 if (inst.operands[1].imm & mask
10515 && inst.operands[0].writeback)
10517 _("having the base register in the register list when "
10518 "using write back is UNPREDICTABLE");
10520 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10522 inst.instruction = THUMB_OP16 (opcode);
10523 inst.instruction |= inst.operands[0].reg << 3;
10524 inst.instruction |= (ffs (inst.operands[1].imm)-1);
10528 else if (inst.operands[0] .reg == REG_SP)
10530 if (inst.operands[0].writeback)
10533 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10534 ? T_MNEM_push : T_MNEM_pop);
10535 inst.instruction |= inst.operands[1].imm;
10538 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10541 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10542 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10543 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10551 if (inst.instruction < 0xffff)
10552 inst.instruction = THUMB_OP32 (inst.instruction);
10554 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10555 inst.operands[0].writeback);
10560 constraint (inst.operands[0].reg > 7
10561 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10562 constraint (inst.instruction != T_MNEM_ldmia
10563 && inst.instruction != T_MNEM_stmia,
10564 _("Thumb-2 instruction only valid in unified syntax"));
10565 if (inst.instruction == T_MNEM_stmia)
10567 if (!inst.operands[0].writeback)
10568 as_warn (_("this instruction will write back the base register"));
10569 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10570 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10571 as_warn (_("value stored for r%d is UNKNOWN"),
10572 inst.operands[0].reg);
10576 if (!inst.operands[0].writeback
10577 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10578 as_warn (_("this instruction will write back the base register"));
10579 else if (inst.operands[0].writeback
10580 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10581 as_warn (_("this instruction will not write back the base register"));
10584 inst.instruction = THUMB_OP16 (inst.instruction);
10585 inst.instruction |= inst.operands[0].reg << 8;
10586 inst.instruction |= inst.operands[1].imm;
10593 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10594 || inst.operands[1].postind || inst.operands[1].writeback
10595 || inst.operands[1].immisreg || inst.operands[1].shifted
10596 || inst.operands[1].negative,
10599 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10601 inst.instruction |= inst.operands[0].reg << 12;
10602 inst.instruction |= inst.operands[1].reg << 16;
10603 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10609 if (!inst.operands[1].present)
10611 constraint (inst.operands[0].reg == REG_LR,
10612 _("r14 not allowed as first register "
10613 "when second register is omitted"));
10614 inst.operands[1].reg = inst.operands[0].reg + 1;
10616 constraint (inst.operands[0].reg == inst.operands[1].reg,
10619 inst.instruction |= inst.operands[0].reg << 12;
10620 inst.instruction |= inst.operands[1].reg << 8;
10621 inst.instruction |= inst.operands[2].reg << 16;
10627 unsigned long opcode;
10630 if (inst.operands[0].isreg
10631 && !inst.operands[0].preind
10632 && inst.operands[0].reg == REG_PC)
10633 set_it_insn_type_last ();
10635 opcode = inst.instruction;
10636 if (unified_syntax)
10638 if (!inst.operands[1].isreg)
10640 if (opcode <= 0xffff)
10641 inst.instruction = THUMB_OP32 (opcode);
10642 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10645 if (inst.operands[1].isreg
10646 && !inst.operands[1].writeback
10647 && !inst.operands[1].shifted && !inst.operands[1].postind
10648 && !inst.operands[1].negative && inst.operands[0].reg <= 7
10649 && opcode <= 0xffff
10650 && inst.size_req != 4)
10652 /* Insn may have a 16-bit form. */
10653 Rn = inst.operands[1].reg;
10654 if (inst.operands[1].immisreg)
10656 inst.instruction = THUMB_OP16 (opcode);
10658 if (Rn <= 7 && inst.operands[1].imm <= 7)
10660 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10661 reject_bad_reg (inst.operands[1].imm);
10663 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10664 && opcode != T_MNEM_ldrsb)
10665 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10666 || (Rn == REG_SP && opcode == T_MNEM_str))
10673 if (inst.reloc.pc_rel)
10674 opcode = T_MNEM_ldr_pc2;
10676 opcode = T_MNEM_ldr_pc;
10680 if (opcode == T_MNEM_ldr)
10681 opcode = T_MNEM_ldr_sp;
10683 opcode = T_MNEM_str_sp;
10685 inst.instruction = inst.operands[0].reg << 8;
10689 inst.instruction = inst.operands[0].reg;
10690 inst.instruction |= inst.operands[1].reg << 3;
10692 inst.instruction |= THUMB_OP16 (opcode);
10693 if (inst.size_req == 2)
10694 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10696 inst.relax = opcode;
10700 /* Definitely a 32-bit variant. */
10702 /* Warning for Erratum 752419. */
10703 if (opcode == T_MNEM_ldr
10704 && inst.operands[0].reg == REG_SP
10705 && inst.operands[1].writeback == 1
10706 && !inst.operands[1].immisreg)
10708 if (no_cpu_selected ()
10709 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10710 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10711 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10712 as_warn (_("This instruction may be unpredictable "
10713 "if executed on M-profile cores "
10714 "with interrupts enabled."));
10717 /* Do some validations regarding addressing modes. */
10718 if (inst.operands[1].immisreg)
10719 reject_bad_reg (inst.operands[1].imm);
10721 constraint (inst.operands[1].writeback == 1
10722 && inst.operands[0].reg == inst.operands[1].reg,
10725 inst.instruction = THUMB_OP32 (opcode);
10726 inst.instruction |= inst.operands[0].reg << 12;
10727 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10728 check_ldr_r15_aligned ();
10732 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10734 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10736 /* Only [Rn,Rm] is acceptable. */
10737 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10738 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10739 || inst.operands[1].postind || inst.operands[1].shifted
10740 || inst.operands[1].negative,
10741 _("Thumb does not support this addressing mode"));
10742 inst.instruction = THUMB_OP16 (inst.instruction);
10746 inst.instruction = THUMB_OP16 (inst.instruction);
10747 if (!inst.operands[1].isreg)
10748 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10751 constraint (!inst.operands[1].preind
10752 || inst.operands[1].shifted
10753 || inst.operands[1].writeback,
10754 _("Thumb does not support this addressing mode"));
10755 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10757 constraint (inst.instruction & 0x0600,
10758 _("byte or halfword not valid for base register"));
10759 constraint (inst.operands[1].reg == REG_PC
10760 && !(inst.instruction & THUMB_LOAD_BIT),
10761 _("r15 based store not allowed"));
10762 constraint (inst.operands[1].immisreg,
10763 _("invalid base register for register offset"));
10765 if (inst.operands[1].reg == REG_PC)
10766 inst.instruction = T_OPCODE_LDR_PC;
10767 else if (inst.instruction & THUMB_LOAD_BIT)
10768 inst.instruction = T_OPCODE_LDR_SP;
10770 inst.instruction = T_OPCODE_STR_SP;
10772 inst.instruction |= inst.operands[0].reg << 8;
10773 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10777 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10778 if (!inst.operands[1].immisreg)
10780 /* Immediate offset. */
10781 inst.instruction |= inst.operands[0].reg;
10782 inst.instruction |= inst.operands[1].reg << 3;
10783 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10787 /* Register offset. */
10788 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10789 constraint (inst.operands[1].negative,
10790 _("Thumb does not support this addressing mode"));
10793 switch (inst.instruction)
10795 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10796 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10797 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10798 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10799 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10800 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10801 case 0x5600 /* ldrsb */:
10802 case 0x5e00 /* ldrsh */: break;
10806 inst.instruction |= inst.operands[0].reg;
10807 inst.instruction |= inst.operands[1].reg << 3;
10808 inst.instruction |= inst.operands[1].imm << 6;
10814 if (!inst.operands[1].present)
10816 inst.operands[1].reg = inst.operands[0].reg + 1;
10817 constraint (inst.operands[0].reg == REG_LR,
10818 _("r14 not allowed here"));
10819 constraint (inst.operands[0].reg == REG_R12,
10820 _("r12 not allowed here"));
10823 if (inst.operands[2].writeback
10824 && (inst.operands[0].reg == inst.operands[2].reg
10825 || inst.operands[1].reg == inst.operands[2].reg))
10826 as_warn (_("base register written back, and overlaps "
10827 "one of transfer registers"));
10829 inst.instruction |= inst.operands[0].reg << 12;
10830 inst.instruction |= inst.operands[1].reg << 8;
10831 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10837 inst.instruction |= inst.operands[0].reg << 12;
10838 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10844 unsigned Rd, Rn, Rm, Ra;
10846 Rd = inst.operands[0].reg;
10847 Rn = inst.operands[1].reg;
10848 Rm = inst.operands[2].reg;
10849 Ra = inst.operands[3].reg;
10851 reject_bad_reg (Rd);
10852 reject_bad_reg (Rn);
10853 reject_bad_reg (Rm);
10854 reject_bad_reg (Ra);
10856 inst.instruction |= Rd << 8;
10857 inst.instruction |= Rn << 16;
10858 inst.instruction |= Rm;
10859 inst.instruction |= Ra << 12;
10865 unsigned RdLo, RdHi, Rn, Rm;
10867 RdLo = inst.operands[0].reg;
10868 RdHi = inst.operands[1].reg;
10869 Rn = inst.operands[2].reg;
10870 Rm = inst.operands[3].reg;
10872 reject_bad_reg (RdLo);
10873 reject_bad_reg (RdHi);
10874 reject_bad_reg (Rn);
10875 reject_bad_reg (Rm);
10877 inst.instruction |= RdLo << 12;
10878 inst.instruction |= RdHi << 8;
10879 inst.instruction |= Rn << 16;
10880 inst.instruction |= Rm;
10884 do_t_mov_cmp (void)
10888 Rn = inst.operands[0].reg;
10889 Rm = inst.operands[1].reg;
10892 set_it_insn_type_last ();
10894 if (unified_syntax)
10896 int r0off = (inst.instruction == T_MNEM_mov
10897 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10898 unsigned long opcode;
10899 bfd_boolean narrow;
10900 bfd_boolean low_regs;
10902 low_regs = (Rn <= 7 && Rm <= 7);
10903 opcode = inst.instruction;
10904 if (in_it_block ())
10905 narrow = opcode != T_MNEM_movs;
10907 narrow = opcode != T_MNEM_movs || low_regs;
10908 if (inst.size_req == 4
10909 || inst.operands[1].shifted)
10912 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10913 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10914 && !inst.operands[1].shifted
10918 inst.instruction = T2_SUBS_PC_LR;
10922 if (opcode == T_MNEM_cmp)
10924 constraint (Rn == REG_PC, BAD_PC);
10927 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10929 warn_deprecated_sp (Rm);
10930 /* R15 was documented as a valid choice for Rm in ARMv6,
10931 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10932 tools reject R15, so we do too. */
10933 constraint (Rm == REG_PC, BAD_PC);
10936 reject_bad_reg (Rm);
10938 else if (opcode == T_MNEM_mov
10939 || opcode == T_MNEM_movs)
10941 if (inst.operands[1].isreg)
10943 if (opcode == T_MNEM_movs)
10945 reject_bad_reg (Rn);
10946 reject_bad_reg (Rm);
10950 /* This is mov.n. */
10951 if ((Rn == REG_SP || Rn == REG_PC)
10952 && (Rm == REG_SP || Rm == REG_PC))
10954 as_warn (_("Use of r%u as a source register is "
10955 "deprecated when r%u is the destination "
10956 "register."), Rm, Rn);
10961 /* This is mov.w. */
10962 constraint (Rn == REG_PC, BAD_PC);
10963 constraint (Rm == REG_PC, BAD_PC);
10964 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
10968 reject_bad_reg (Rn);
10971 if (!inst.operands[1].isreg)
10973 /* Immediate operand. */
10974 if (!in_it_block () && opcode == T_MNEM_mov)
10976 if (low_regs && narrow)
10978 inst.instruction = THUMB_OP16 (opcode);
10979 inst.instruction |= Rn << 8;
10980 if (inst.size_req == 2)
10981 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10983 inst.relax = opcode;
10987 inst.instruction = THUMB_OP32 (inst.instruction);
10988 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10989 inst.instruction |= Rn << r0off;
10990 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10993 else if (inst.operands[1].shifted && inst.operands[1].immisreg
10994 && (inst.instruction == T_MNEM_mov
10995 || inst.instruction == T_MNEM_movs))
10997 /* Register shifts are encoded as separate shift instructions. */
10998 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11000 if (in_it_block ())
11005 if (inst.size_req == 4)
11008 if (!low_regs || inst.operands[1].imm > 7)
11014 switch (inst.operands[1].shift_kind)
11017 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11020 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11023 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11026 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11032 inst.instruction = opcode;
11035 inst.instruction |= Rn;
11036 inst.instruction |= inst.operands[1].imm << 3;
11041 inst.instruction |= CONDS_BIT;
11043 inst.instruction |= Rn << 8;
11044 inst.instruction |= Rm << 16;
11045 inst.instruction |= inst.operands[1].imm;
11050 /* Some mov with immediate shift have narrow variants.
11051 Register shifts are handled above. */
11052 if (low_regs && inst.operands[1].shifted
11053 && (inst.instruction == T_MNEM_mov
11054 || inst.instruction == T_MNEM_movs))
11056 if (in_it_block ())
11057 narrow = (inst.instruction == T_MNEM_mov);
11059 narrow = (inst.instruction == T_MNEM_movs);
11064 switch (inst.operands[1].shift_kind)
11066 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11067 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11068 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11069 default: narrow = FALSE; break;
11075 inst.instruction |= Rn;
11076 inst.instruction |= Rm << 3;
11077 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11081 inst.instruction = THUMB_OP32 (inst.instruction);
11082 inst.instruction |= Rn << r0off;
11083 encode_thumb32_shifted_operand (1);
11087 switch (inst.instruction)
11090 /* In v4t or v5t a move of two lowregs produces unpredictable
11091 results. Don't allow this. */
11094 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11095 "MOV Rd, Rs with two low registers is not "
11096 "permitted on this architecture");
11097 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11101 inst.instruction = T_OPCODE_MOV_HR;
11102 inst.instruction |= (Rn & 0x8) << 4;
11103 inst.instruction |= (Rn & 0x7);
11104 inst.instruction |= Rm << 3;
11108 /* We know we have low registers at this point.
11109 Generate LSLS Rd, Rs, #0. */
11110 inst.instruction = T_OPCODE_LSL_I;
11111 inst.instruction |= Rn;
11112 inst.instruction |= Rm << 3;
11118 inst.instruction = T_OPCODE_CMP_LR;
11119 inst.instruction |= Rn;
11120 inst.instruction |= Rm << 3;
11124 inst.instruction = T_OPCODE_CMP_HR;
11125 inst.instruction |= (Rn & 0x8) << 4;
11126 inst.instruction |= (Rn & 0x7);
11127 inst.instruction |= Rm << 3;
11134 inst.instruction = THUMB_OP16 (inst.instruction);
11136 /* PR 10443: Do not silently ignore shifted operands. */
11137 constraint (inst.operands[1].shifted,
11138 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11140 if (inst.operands[1].isreg)
11142 if (Rn < 8 && Rm < 8)
11144 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11145 since a MOV instruction produces unpredictable results. */
11146 if (inst.instruction == T_OPCODE_MOV_I8)
11147 inst.instruction = T_OPCODE_ADD_I3;
11149 inst.instruction = T_OPCODE_CMP_LR;
11151 inst.instruction |= Rn;
11152 inst.instruction |= Rm << 3;
11156 if (inst.instruction == T_OPCODE_MOV_I8)
11157 inst.instruction = T_OPCODE_MOV_HR;
11159 inst.instruction = T_OPCODE_CMP_HR;
11165 constraint (Rn > 7,
11166 _("only lo regs allowed with immediate"));
11167 inst.instruction |= Rn << 8;
11168 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11179 top = (inst.instruction & 0x00800000) != 0;
11180 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11182 constraint (top, _(":lower16: not allowed this instruction"));
11183 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11185 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11187 constraint (!top, _(":upper16: not allowed this instruction"));
11188 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11191 Rd = inst.operands[0].reg;
11192 reject_bad_reg (Rd);
11194 inst.instruction |= Rd << 8;
11195 if (inst.reloc.type == BFD_RELOC_UNUSED)
11197 imm = inst.reloc.exp.X_add_number;
11198 inst.instruction |= (imm & 0xf000) << 4;
11199 inst.instruction |= (imm & 0x0800) << 15;
11200 inst.instruction |= (imm & 0x0700) << 4;
11201 inst.instruction |= (imm & 0x00ff);
11206 do_t_mvn_tst (void)
11210 Rn = inst.operands[0].reg;
11211 Rm = inst.operands[1].reg;
11213 if (inst.instruction == T_MNEM_cmp
11214 || inst.instruction == T_MNEM_cmn)
11215 constraint (Rn == REG_PC, BAD_PC);
11217 reject_bad_reg (Rn);
11218 reject_bad_reg (Rm);
11220 if (unified_syntax)
11222 int r0off = (inst.instruction == T_MNEM_mvn
11223 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11224 bfd_boolean narrow;
11226 if (inst.size_req == 4
11227 || inst.instruction > 0xffff
11228 || inst.operands[1].shifted
11229 || Rn > 7 || Rm > 7)
11231 else if (inst.instruction == T_MNEM_cmn)
11233 else if (THUMB_SETS_FLAGS (inst.instruction))
11234 narrow = !in_it_block ();
11236 narrow = in_it_block ();
11238 if (!inst.operands[1].isreg)
11240 /* For an immediate, we always generate a 32-bit opcode;
11241 section relaxation will shrink it later if possible. */
11242 if (inst.instruction < 0xffff)
11243 inst.instruction = THUMB_OP32 (inst.instruction);
11244 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11245 inst.instruction |= Rn << r0off;
11246 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11250 /* See if we can do this with a 16-bit instruction. */
11253 inst.instruction = THUMB_OP16 (inst.instruction);
11254 inst.instruction |= Rn;
11255 inst.instruction |= Rm << 3;
11259 constraint (inst.operands[1].shifted
11260 && inst.operands[1].immisreg,
11261 _("shift must be constant"));
11262 if (inst.instruction < 0xffff)
11263 inst.instruction = THUMB_OP32 (inst.instruction);
11264 inst.instruction |= Rn << r0off;
11265 encode_thumb32_shifted_operand (1);
11271 constraint (inst.instruction > 0xffff
11272 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11273 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11274 _("unshifted register required"));
11275 constraint (Rn > 7 || Rm > 7,
11278 inst.instruction = THUMB_OP16 (inst.instruction);
11279 inst.instruction |= Rn;
11280 inst.instruction |= Rm << 3;
11289 if (do_vfp_nsyn_mrs () == SUCCESS)
11292 Rd = inst.operands[0].reg;
11293 reject_bad_reg (Rd);
11294 inst.instruction |= Rd << 8;
11296 if (inst.operands[1].isreg)
11298 unsigned br = inst.operands[1].reg;
11299 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11300 as_bad (_("bad register for mrs"));
11302 inst.instruction |= br & (0xf << 16);
11303 inst.instruction |= (br & 0x300) >> 4;
11304 inst.instruction |= (br & SPSR_BIT) >> 2;
11308 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11310 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11312 /* PR gas/12698: The constraint is only applied for m_profile.
11313 If the user has specified -march=all, we want to ignore it as
11314 we are building for any CPU type, including non-m variants. */
11315 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11316 constraint ((flags != 0) && m_profile, _("selected processor does "
11317 "not support requested special purpose register"));
11320 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11322 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11323 _("'APSR', 'CPSR' or 'SPSR' expected"));
11325 inst.instruction |= (flags & SPSR_BIT) >> 2;
11326 inst.instruction |= inst.operands[1].imm & 0xff;
11327 inst.instruction |= 0xf0000;
11337 if (do_vfp_nsyn_msr () == SUCCESS)
11340 constraint (!inst.operands[1].isreg,
11341 _("Thumb encoding does not support an immediate here"));
11343 if (inst.operands[0].isreg)
11344 flags = (int)(inst.operands[0].reg);
11346 flags = inst.operands[0].imm;
11348 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11350 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11352 /* PR gas/12698: The constraint is only applied for m_profile.
11353 If the user has specified -march=all, we want to ignore it as
11354 we are building for any CPU type, including non-m variants. */
11355 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11356 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11357 && (bits & ~(PSR_s | PSR_f)) != 0)
11358 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11359 && bits != PSR_f)) && m_profile,
11360 _("selected processor does not support requested special "
11361 "purpose register"));
11364 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11365 "requested special purpose register"));
11367 Rn = inst.operands[1].reg;
11368 reject_bad_reg (Rn);
11370 inst.instruction |= (flags & SPSR_BIT) >> 2;
11371 inst.instruction |= (flags & 0xf0000) >> 8;
11372 inst.instruction |= (flags & 0x300) >> 4;
11373 inst.instruction |= (flags & 0xff);
11374 inst.instruction |= Rn << 16;
11380 bfd_boolean narrow;
11381 unsigned Rd, Rn, Rm;
11383 if (!inst.operands[2].present)
11384 inst.operands[2].reg = inst.operands[0].reg;
11386 Rd = inst.operands[0].reg;
11387 Rn = inst.operands[1].reg;
11388 Rm = inst.operands[2].reg;
11390 if (unified_syntax)
11392 if (inst.size_req == 4
11398 else if (inst.instruction == T_MNEM_muls)
11399 narrow = !in_it_block ();
11401 narrow = in_it_block ();
11405 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11406 constraint (Rn > 7 || Rm > 7,
11413 /* 16-bit MULS/Conditional MUL. */
11414 inst.instruction = THUMB_OP16 (inst.instruction);
11415 inst.instruction |= Rd;
11418 inst.instruction |= Rm << 3;
11420 inst.instruction |= Rn << 3;
11422 constraint (1, _("dest must overlap one source register"));
11426 constraint (inst.instruction != T_MNEM_mul,
11427 _("Thumb-2 MUL must not set flags"));
11429 inst.instruction = THUMB_OP32 (inst.instruction);
11430 inst.instruction |= Rd << 8;
11431 inst.instruction |= Rn << 16;
11432 inst.instruction |= Rm << 0;
11434 reject_bad_reg (Rd);
11435 reject_bad_reg (Rn);
11436 reject_bad_reg (Rm);
11443 unsigned RdLo, RdHi, Rn, Rm;
11445 RdLo = inst.operands[0].reg;
11446 RdHi = inst.operands[1].reg;
11447 Rn = inst.operands[2].reg;
11448 Rm = inst.operands[3].reg;
11450 reject_bad_reg (RdLo);
11451 reject_bad_reg (RdHi);
11452 reject_bad_reg (Rn);
11453 reject_bad_reg (Rm);
11455 inst.instruction |= RdLo << 12;
11456 inst.instruction |= RdHi << 8;
11457 inst.instruction |= Rn << 16;
11458 inst.instruction |= Rm;
11461 as_tsktsk (_("rdhi and rdlo must be different"));
11467 set_it_insn_type (NEUTRAL_IT_INSN);
11469 if (unified_syntax)
11471 if (inst.size_req == 4 || inst.operands[0].imm > 15)
11473 inst.instruction = THUMB_OP32 (inst.instruction);
11474 inst.instruction |= inst.operands[0].imm;
11478 /* PR9722: Check for Thumb2 availability before
11479 generating a thumb2 nop instruction. */
11480 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11482 inst.instruction = THUMB_OP16 (inst.instruction);
11483 inst.instruction |= inst.operands[0].imm << 4;
11486 inst.instruction = 0x46c0;
11491 constraint (inst.operands[0].present,
11492 _("Thumb does not support NOP with hints"));
11493 inst.instruction = 0x46c0;
11500 if (unified_syntax)
11502 bfd_boolean narrow;
11504 if (THUMB_SETS_FLAGS (inst.instruction))
11505 narrow = !in_it_block ();
11507 narrow = in_it_block ();
11508 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11510 if (inst.size_req == 4)
11515 inst.instruction = THUMB_OP32 (inst.instruction);
11516 inst.instruction |= inst.operands[0].reg << 8;
11517 inst.instruction |= inst.operands[1].reg << 16;
11521 inst.instruction = THUMB_OP16 (inst.instruction);
11522 inst.instruction |= inst.operands[0].reg;
11523 inst.instruction |= inst.operands[1].reg << 3;
11528 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11530 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11532 inst.instruction = THUMB_OP16 (inst.instruction);
11533 inst.instruction |= inst.operands[0].reg;
11534 inst.instruction |= inst.operands[1].reg << 3;
11543 Rd = inst.operands[0].reg;
11544 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11546 reject_bad_reg (Rd);
11547 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
11548 reject_bad_reg (Rn);
11550 inst.instruction |= Rd << 8;
11551 inst.instruction |= Rn << 16;
11553 if (!inst.operands[2].isreg)
11555 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11556 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11562 Rm = inst.operands[2].reg;
11563 reject_bad_reg (Rm);
11565 constraint (inst.operands[2].shifted
11566 && inst.operands[2].immisreg,
11567 _("shift must be constant"));
11568 encode_thumb32_shifted_operand (2);
11575 unsigned Rd, Rn, Rm;
11577 Rd = inst.operands[0].reg;
11578 Rn = inst.operands[1].reg;
11579 Rm = inst.operands[2].reg;
11581 reject_bad_reg (Rd);
11582 reject_bad_reg (Rn);
11583 reject_bad_reg (Rm);
11585 inst.instruction |= Rd << 8;
11586 inst.instruction |= Rn << 16;
11587 inst.instruction |= Rm;
11588 if (inst.operands[3].present)
11590 unsigned int val = inst.reloc.exp.X_add_number;
11591 constraint (inst.reloc.exp.X_op != O_constant,
11592 _("expression too complex"));
11593 inst.instruction |= (val & 0x1c) << 10;
11594 inst.instruction |= (val & 0x03) << 6;
11601 if (!inst.operands[3].present)
11605 inst.instruction &= ~0x00000020;
11607 /* PR 10168. Swap the Rm and Rn registers. */
11608 Rtmp = inst.operands[1].reg;
11609 inst.operands[1].reg = inst.operands[2].reg;
11610 inst.operands[2].reg = Rtmp;
11618 if (inst.operands[0].immisreg)
11619 reject_bad_reg (inst.operands[0].imm);
11621 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11625 do_t_push_pop (void)
11629 constraint (inst.operands[0].writeback,
11630 _("push/pop do not support {reglist}^"));
11631 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11632 _("expression too complex"));
11634 mask = inst.operands[0].imm;
11635 if ((mask & ~0xff) == 0)
11636 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11637 else if ((inst.instruction == T_MNEM_push
11638 && (mask & ~0xff) == 1 << REG_LR)
11639 || (inst.instruction == T_MNEM_pop
11640 && (mask & ~0xff) == 1 << REG_PC))
11642 inst.instruction = THUMB_OP16 (inst.instruction);
11643 inst.instruction |= THUMB_PP_PC_LR;
11644 inst.instruction |= mask & 0xff;
11646 else if (unified_syntax)
11648 inst.instruction = THUMB_OP32 (inst.instruction);
11649 encode_thumb2_ldmstm (13, mask, TRUE);
11653 inst.error = _("invalid register list to push/pop instruction");
11663 Rd = inst.operands[0].reg;
11664 Rm = inst.operands[1].reg;
11666 reject_bad_reg (Rd);
11667 reject_bad_reg (Rm);
11669 inst.instruction |= Rd << 8;
11670 inst.instruction |= Rm << 16;
11671 inst.instruction |= Rm;
11679 Rd = inst.operands[0].reg;
11680 Rm = inst.operands[1].reg;
11682 reject_bad_reg (Rd);
11683 reject_bad_reg (Rm);
11685 if (Rd <= 7 && Rm <= 7
11686 && inst.size_req != 4)
11688 inst.instruction = THUMB_OP16 (inst.instruction);
11689 inst.instruction |= Rd;
11690 inst.instruction |= Rm << 3;
11692 else if (unified_syntax)
11694 inst.instruction = THUMB_OP32 (inst.instruction);
11695 inst.instruction |= Rd << 8;
11696 inst.instruction |= Rm << 16;
11697 inst.instruction |= Rm;
11700 inst.error = BAD_HIREG;
11708 Rd = inst.operands[0].reg;
11709 Rm = inst.operands[1].reg;
11711 reject_bad_reg (Rd);
11712 reject_bad_reg (Rm);
11714 inst.instruction |= Rd << 8;
11715 inst.instruction |= Rm;
11723 Rd = inst.operands[0].reg;
11724 Rs = (inst.operands[1].present
11725 ? inst.operands[1].reg /* Rd, Rs, foo */
11726 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11728 reject_bad_reg (Rd);
11729 reject_bad_reg (Rs);
11730 if (inst.operands[2].isreg)
11731 reject_bad_reg (inst.operands[2].reg);
11733 inst.instruction |= Rd << 8;
11734 inst.instruction |= Rs << 16;
11735 if (!inst.operands[2].isreg)
11737 bfd_boolean narrow;
11739 if ((inst.instruction & 0x00100000) != 0)
11740 narrow = !in_it_block ();
11742 narrow = in_it_block ();
11744 if (Rd > 7 || Rs > 7)
11747 if (inst.size_req == 4 || !unified_syntax)
11750 if (inst.reloc.exp.X_op != O_constant
11751 || inst.reloc.exp.X_add_number != 0)
11754 /* Turn rsb #0 into 16-bit neg. We should probably do this via
11755 relaxation, but it doesn't seem worth the hassle. */
11758 inst.reloc.type = BFD_RELOC_UNUSED;
11759 inst.instruction = THUMB_OP16 (T_MNEM_negs);
11760 inst.instruction |= Rs << 3;
11761 inst.instruction |= Rd;
11765 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11766 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11770 encode_thumb32_shifted_operand (2);
11776 if (warn_on_deprecated
11777 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11778 as_warn (_("setend use is deprecated for ARMv8"));
11780 set_it_insn_type (OUTSIDE_IT_INSN);
11781 if (inst.operands[0].imm)
11782 inst.instruction |= 0x8;
11788 if (!inst.operands[1].present)
11789 inst.operands[1].reg = inst.operands[0].reg;
11791 if (unified_syntax)
11793 bfd_boolean narrow;
11796 switch (inst.instruction)
11799 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11801 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11803 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11805 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11809 if (THUMB_SETS_FLAGS (inst.instruction))
11810 narrow = !in_it_block ();
11812 narrow = in_it_block ();
11813 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11815 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11817 if (inst.operands[2].isreg
11818 && (inst.operands[1].reg != inst.operands[0].reg
11819 || inst.operands[2].reg > 7))
11821 if (inst.size_req == 4)
11824 reject_bad_reg (inst.operands[0].reg);
11825 reject_bad_reg (inst.operands[1].reg);
11829 if (inst.operands[2].isreg)
11831 reject_bad_reg (inst.operands[2].reg);
11832 inst.instruction = THUMB_OP32 (inst.instruction);
11833 inst.instruction |= inst.operands[0].reg << 8;
11834 inst.instruction |= inst.operands[1].reg << 16;
11835 inst.instruction |= inst.operands[2].reg;
11837 /* PR 12854: Error on extraneous shifts. */
11838 constraint (inst.operands[2].shifted,
11839 _("extraneous shift as part of operand to shift insn"));
11843 inst.operands[1].shifted = 1;
11844 inst.operands[1].shift_kind = shift_kind;
11845 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11846 ? T_MNEM_movs : T_MNEM_mov);
11847 inst.instruction |= inst.operands[0].reg << 8;
11848 encode_thumb32_shifted_operand (1);
11849 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11850 inst.reloc.type = BFD_RELOC_UNUSED;
11855 if (inst.operands[2].isreg)
11857 switch (shift_kind)
11859 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11860 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11861 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11862 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11866 inst.instruction |= inst.operands[0].reg;
11867 inst.instruction |= inst.operands[2].reg << 3;
11869 /* PR 12854: Error on extraneous shifts. */
11870 constraint (inst.operands[2].shifted,
11871 _("extraneous shift as part of operand to shift insn"));
11875 switch (shift_kind)
11877 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11878 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11879 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11882 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11883 inst.instruction |= inst.operands[0].reg;
11884 inst.instruction |= inst.operands[1].reg << 3;
11890 constraint (inst.operands[0].reg > 7
11891 || inst.operands[1].reg > 7, BAD_HIREG);
11892 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11894 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11896 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11897 constraint (inst.operands[0].reg != inst.operands[1].reg,
11898 _("source1 and dest must be same register"));
11900 switch (inst.instruction)
11902 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11903 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11904 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11905 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11909 inst.instruction |= inst.operands[0].reg;
11910 inst.instruction |= inst.operands[2].reg << 3;
11912 /* PR 12854: Error on extraneous shifts. */
11913 constraint (inst.operands[2].shifted,
11914 _("extraneous shift as part of operand to shift insn"));
11918 switch (inst.instruction)
11920 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11921 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11922 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11923 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11926 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11927 inst.instruction |= inst.operands[0].reg;
11928 inst.instruction |= inst.operands[1].reg << 3;
11936 unsigned Rd, Rn, Rm;
11938 Rd = inst.operands[0].reg;
11939 Rn = inst.operands[1].reg;
11940 Rm = inst.operands[2].reg;
11942 reject_bad_reg (Rd);
11943 reject_bad_reg (Rn);
11944 reject_bad_reg (Rm);
11946 inst.instruction |= Rd << 8;
11947 inst.instruction |= Rn << 16;
11948 inst.instruction |= Rm;
11954 unsigned Rd, Rn, Rm;
11956 Rd = inst.operands[0].reg;
11957 Rm = inst.operands[1].reg;
11958 Rn = inst.operands[2].reg;
11960 reject_bad_reg (Rd);
11961 reject_bad_reg (Rn);
11962 reject_bad_reg (Rm);
11964 inst.instruction |= Rd << 8;
11965 inst.instruction |= Rn << 16;
11966 inst.instruction |= Rm;
11972 unsigned int value = inst.reloc.exp.X_add_number;
11973 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
11974 _("SMC is not permitted on this architecture"));
11975 constraint (inst.reloc.exp.X_op != O_constant,
11976 _("expression too complex"));
11977 inst.reloc.type = BFD_RELOC_UNUSED;
11978 inst.instruction |= (value & 0xf000) >> 12;
11979 inst.instruction |= (value & 0x0ff0);
11980 inst.instruction |= (value & 0x000f) << 16;
11986 unsigned int value = inst.reloc.exp.X_add_number;
11988 inst.reloc.type = BFD_RELOC_UNUSED;
11989 inst.instruction |= (value & 0x0fff);
11990 inst.instruction |= (value & 0xf000) << 4;
11994 do_t_ssat_usat (int bias)
11998 Rd = inst.operands[0].reg;
11999 Rn = inst.operands[2].reg;
12001 reject_bad_reg (Rd);
12002 reject_bad_reg (Rn);
12004 inst.instruction |= Rd << 8;
12005 inst.instruction |= inst.operands[1].imm - bias;
12006 inst.instruction |= Rn << 16;
12008 if (inst.operands[3].present)
12010 offsetT shift_amount = inst.reloc.exp.X_add_number;
12012 inst.reloc.type = BFD_RELOC_UNUSED;
12014 constraint (inst.reloc.exp.X_op != O_constant,
12015 _("expression too complex"));
12017 if (shift_amount != 0)
12019 constraint (shift_amount > 31,
12020 _("shift expression is too large"));
12022 if (inst.operands[3].shift_kind == SHIFT_ASR)
12023 inst.instruction |= 0x00200000; /* sh bit. */
12025 inst.instruction |= (shift_amount & 0x1c) << 10;
12026 inst.instruction |= (shift_amount & 0x03) << 6;
12034 do_t_ssat_usat (1);
12042 Rd = inst.operands[0].reg;
12043 Rn = inst.operands[2].reg;
12045 reject_bad_reg (Rd);
12046 reject_bad_reg (Rn);
12048 inst.instruction |= Rd << 8;
12049 inst.instruction |= inst.operands[1].imm - 1;
12050 inst.instruction |= Rn << 16;
12056 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12057 || inst.operands[2].postind || inst.operands[2].writeback
12058 || inst.operands[2].immisreg || inst.operands[2].shifted
12059 || inst.operands[2].negative,
12062 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12064 inst.instruction |= inst.operands[0].reg << 8;
12065 inst.instruction |= inst.operands[1].reg << 12;
12066 inst.instruction |= inst.operands[2].reg << 16;
12067 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12073 if (!inst.operands[2].present)
12074 inst.operands[2].reg = inst.operands[1].reg + 1;
12076 constraint (inst.operands[0].reg == inst.operands[1].reg
12077 || inst.operands[0].reg == inst.operands[2].reg
12078 || inst.operands[0].reg == inst.operands[3].reg,
12081 inst.instruction |= inst.operands[0].reg;
12082 inst.instruction |= inst.operands[1].reg << 12;
12083 inst.instruction |= inst.operands[2].reg << 8;
12084 inst.instruction |= inst.operands[3].reg << 16;
12090 unsigned Rd, Rn, Rm;
12092 Rd = inst.operands[0].reg;
12093 Rn = inst.operands[1].reg;
12094 Rm = inst.operands[2].reg;
12096 reject_bad_reg (Rd);
12097 reject_bad_reg (Rn);
12098 reject_bad_reg (Rm);
12100 inst.instruction |= Rd << 8;
12101 inst.instruction |= Rn << 16;
12102 inst.instruction |= Rm;
12103 inst.instruction |= inst.operands[3].imm << 4;
12111 Rd = inst.operands[0].reg;
12112 Rm = inst.operands[1].reg;
12114 reject_bad_reg (Rd);
12115 reject_bad_reg (Rm);
12117 if (inst.instruction <= 0xffff
12118 && inst.size_req != 4
12119 && Rd <= 7 && Rm <= 7
12120 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12122 inst.instruction = THUMB_OP16 (inst.instruction);
12123 inst.instruction |= Rd;
12124 inst.instruction |= Rm << 3;
12126 else if (unified_syntax)
12128 if (inst.instruction <= 0xffff)
12129 inst.instruction = THUMB_OP32 (inst.instruction);
12130 inst.instruction |= Rd << 8;
12131 inst.instruction |= Rm;
12132 inst.instruction |= inst.operands[2].imm << 4;
12136 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12137 _("Thumb encoding does not support rotation"));
12138 constraint (1, BAD_HIREG);
12145 /* We have to do the following check manually as ARM_EXT_OS only applies
12147 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12149 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12150 /* This only applies to the v6m howver, not later architectures. */
12151 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12152 as_bad (_("SVC is not permitted on this architecture"));
12153 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12156 inst.reloc.type = BFD_RELOC_ARM_SWI;
12165 half = (inst.instruction & 0x10) != 0;
12166 set_it_insn_type_last ();
12167 constraint (inst.operands[0].immisreg,
12168 _("instruction requires register index"));
12170 Rn = inst.operands[0].reg;
12171 Rm = inst.operands[0].imm;
12173 constraint (Rn == REG_SP, BAD_SP);
12174 reject_bad_reg (Rm);
12176 constraint (!half && inst.operands[0].shifted,
12177 _("instruction does not allow shifted index"));
12178 inst.instruction |= (Rn << 16) | Rm;
12184 do_t_ssat_usat (0);
12192 Rd = inst.operands[0].reg;
12193 Rn = inst.operands[2].reg;
12195 reject_bad_reg (Rd);
12196 reject_bad_reg (Rn);
12198 inst.instruction |= Rd << 8;
12199 inst.instruction |= inst.operands[1].imm;
12200 inst.instruction |= Rn << 16;
12203 /* Neon instruction encoder helpers. */
12205 /* Encodings for the different types for various Neon opcodes. */
12207 /* An "invalid" code for the following tables. */
12210 struct neon_tab_entry
12213 unsigned float_or_poly;
12214 unsigned scalar_or_imm;
12217 /* Map overloaded Neon opcodes to their respective encodings. */
12218 #define NEON_ENC_TAB \
12219 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12220 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12221 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12222 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12223 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12224 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12225 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12226 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12227 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12228 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12229 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12230 /* Register variants of the following two instructions are encoded as
12231 vcge / vcgt with the operands reversed. */ \
12232 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12233 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12234 X(vfma, N_INV, 0x0000c10, N_INV), \
12235 X(vfms, N_INV, 0x0200c10, N_INV), \
12236 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12237 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12238 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12239 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12240 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12241 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12242 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12243 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12244 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12245 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12246 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12247 X(vshl, 0x0000400, N_INV, 0x0800510), \
12248 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12249 X(vand, 0x0000110, N_INV, 0x0800030), \
12250 X(vbic, 0x0100110, N_INV, 0x0800030), \
12251 X(veor, 0x1000110, N_INV, N_INV), \
12252 X(vorn, 0x0300110, N_INV, 0x0800010), \
12253 X(vorr, 0x0200110, N_INV, 0x0800010), \
12254 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12255 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12256 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12257 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12258 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12259 X(vst1, 0x0000000, 0x0800000, N_INV), \
12260 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12261 X(vst2, 0x0000100, 0x0800100, N_INV), \
12262 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12263 X(vst3, 0x0000200, 0x0800200, N_INV), \
12264 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12265 X(vst4, 0x0000300, 0x0800300, N_INV), \
12266 X(vmovn, 0x1b20200, N_INV, N_INV), \
12267 X(vtrn, 0x1b20080, N_INV, N_INV), \
12268 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12269 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12270 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12271 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12272 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12273 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12274 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12275 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12276 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12277 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12278 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
12282 #define X(OPC,I,F,S) N_MNEM_##OPC
12287 static const struct neon_tab_entry neon_enc_tab[] =
12289 #define X(OPC,I,F,S) { (I), (F), (S) }
12294 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12295 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12296 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12297 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12298 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12299 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12300 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12301 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12302 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12303 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12304 #define NEON_ENC_SINGLE_(X) \
12305 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12306 #define NEON_ENC_DOUBLE_(X) \
12307 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12309 #define NEON_ENCODE(type, inst) \
12312 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12313 inst.is_neon = 1; \
12317 #define check_neon_suffixes \
12320 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12322 as_bad (_("invalid neon suffix for non neon instruction")); \
12328 /* Define shapes for instruction operands. The following mnemonic characters
12329 are used in this table:
12331 F - VFP S<n> register
12332 D - Neon D<n> register
12333 Q - Neon Q<n> register
12337 L - D<n> register list
12339 This table is used to generate various data:
12340 - enumerations of the form NS_DDR to be used as arguments to
12342 - a table classifying shapes into single, double, quad, mixed.
12343 - a table used to drive neon_select_shape. */
12345 #define NEON_SHAPE_DEF \
12346 X(3, (D, D, D), DOUBLE), \
12347 X(3, (Q, Q, Q), QUAD), \
12348 X(3, (D, D, I), DOUBLE), \
12349 X(3, (Q, Q, I), QUAD), \
12350 X(3, (D, D, S), DOUBLE), \
12351 X(3, (Q, Q, S), QUAD), \
12352 X(2, (D, D), DOUBLE), \
12353 X(2, (Q, Q), QUAD), \
12354 X(2, (D, S), DOUBLE), \
12355 X(2, (Q, S), QUAD), \
12356 X(2, (D, R), DOUBLE), \
12357 X(2, (Q, R), QUAD), \
12358 X(2, (D, I), DOUBLE), \
12359 X(2, (Q, I), QUAD), \
12360 X(3, (D, L, D), DOUBLE), \
12361 X(2, (D, Q), MIXED), \
12362 X(2, (Q, D), MIXED), \
12363 X(3, (D, Q, I), MIXED), \
12364 X(3, (Q, D, I), MIXED), \
12365 X(3, (Q, D, D), MIXED), \
12366 X(3, (D, Q, Q), MIXED), \
12367 X(3, (Q, Q, D), MIXED), \
12368 X(3, (Q, D, S), MIXED), \
12369 X(3, (D, Q, S), MIXED), \
12370 X(4, (D, D, D, I), DOUBLE), \
12371 X(4, (Q, Q, Q, I), QUAD), \
12372 X(2, (F, F), SINGLE), \
12373 X(3, (F, F, F), SINGLE), \
12374 X(2, (F, I), SINGLE), \
12375 X(2, (F, D), MIXED), \
12376 X(2, (D, F), MIXED), \
12377 X(3, (F, F, I), MIXED), \
12378 X(4, (R, R, F, F), SINGLE), \
12379 X(4, (F, F, R, R), SINGLE), \
12380 X(3, (D, R, R), DOUBLE), \
12381 X(3, (R, R, D), DOUBLE), \
12382 X(2, (S, R), SINGLE), \
12383 X(2, (R, S), SINGLE), \
12384 X(2, (F, R), SINGLE), \
12385 X(2, (R, F), SINGLE)
12387 #define S2(A,B) NS_##A##B
12388 #define S3(A,B,C) NS_##A##B##C
12389 #define S4(A,B,C,D) NS_##A##B##C##D
12391 #define X(N, L, C) S##N L
12404 enum neon_shape_class
12412 #define X(N, L, C) SC_##C
12414 static enum neon_shape_class neon_shape_class[] =
12432 /* Register widths of above. */
12433 static unsigned neon_shape_el_size[] =
12444 struct neon_shape_info
12447 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12450 #define S2(A,B) { SE_##A, SE_##B }
12451 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
12452 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
12454 #define X(N, L, C) { N, S##N L }
12456 static struct neon_shape_info neon_shape_tab[] =
12466 /* Bit masks used in type checking given instructions.
12467 'N_EQK' means the type must be the same as (or based on in some way) the key
12468 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12469 set, various other bits can be set as well in order to modify the meaning of
12470 the type constraint. */
12472 enum neon_type_mask
12495 N_KEY = 0x1000000, /* Key element (main type specifier). */
12496 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
12497 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
12498 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
12499 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
12500 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
12501 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
12502 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
12503 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
12504 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
12506 N_MAX_NONSPECIAL = N_F64
12509 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12511 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12512 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12513 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12514 #define N_SUF_32 (N_SU_32 | N_F32)
12515 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
12516 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
12518 /* Pass this as the first type argument to neon_check_type to ignore types
12520 #define N_IGNORE_TYPE (N_KEY | N_EQK)
12522 /* Select a "shape" for the current instruction (describing register types or
12523 sizes) from a list of alternatives. Return NS_NULL if the current instruction
12524 doesn't fit. For non-polymorphic shapes, checking is usually done as a
12525 function of operand parsing, so this function doesn't need to be called.
12526 Shapes should be listed in order of decreasing length. */
12528 static enum neon_shape
12529 neon_select_shape (enum neon_shape shape, ...)
12532 enum neon_shape first_shape = shape;
12534 /* Fix missing optional operands. FIXME: we don't know at this point how
12535 many arguments we should have, so this makes the assumption that we have
12536 > 1. This is true of all current Neon opcodes, I think, but may not be
12537 true in the future. */
12538 if (!inst.operands[1].present)
12539 inst.operands[1] = inst.operands[0];
12541 va_start (ap, shape);
12543 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12548 for (j = 0; j < neon_shape_tab[shape].els; j++)
12550 if (!inst.operands[j].present)
12556 switch (neon_shape_tab[shape].el[j])
12559 if (!(inst.operands[j].isreg
12560 && inst.operands[j].isvec
12561 && inst.operands[j].issingle
12562 && !inst.operands[j].isquad))
12567 if (!(inst.operands[j].isreg
12568 && inst.operands[j].isvec
12569 && !inst.operands[j].isquad
12570 && !inst.operands[j].issingle))
12575 if (!(inst.operands[j].isreg
12576 && !inst.operands[j].isvec))
12581 if (!(inst.operands[j].isreg
12582 && inst.operands[j].isvec
12583 && inst.operands[j].isquad
12584 && !inst.operands[j].issingle))
12589 if (!(!inst.operands[j].isreg
12590 && !inst.operands[j].isscalar))
12595 if (!(!inst.operands[j].isreg
12596 && inst.operands[j].isscalar))
12606 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
12607 /* We've matched all the entries in the shape table, and we don't
12608 have any left over operands which have not been matched. */
12614 if (shape == NS_NULL && first_shape != NS_NULL)
12615 first_error (_("invalid instruction shape"));
12620 /* True if SHAPE is predominantly a quadword operation (most of the time, this
12621 means the Q bit should be set). */
12624 neon_quad (enum neon_shape shape)
12626 return neon_shape_class[shape] == SC_QUAD;
12630 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12633 /* Allow modification to be made to types which are constrained to be
12634 based on the key element, based on bits set alongside N_EQK. */
12635 if ((typebits & N_EQK) != 0)
12637 if ((typebits & N_HLF) != 0)
12639 else if ((typebits & N_DBL) != 0)
12641 if ((typebits & N_SGN) != 0)
12642 *g_type = NT_signed;
12643 else if ((typebits & N_UNS) != 0)
12644 *g_type = NT_unsigned;
12645 else if ((typebits & N_INT) != 0)
12646 *g_type = NT_integer;
12647 else if ((typebits & N_FLT) != 0)
12648 *g_type = NT_float;
12649 else if ((typebits & N_SIZ) != 0)
12650 *g_type = NT_untyped;
12654 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12655 operand type, i.e. the single type specified in a Neon instruction when it
12656 is the only one given. */
12658 static struct neon_type_el
12659 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12661 struct neon_type_el dest = *key;
12663 gas_assert ((thisarg & N_EQK) != 0);
12665 neon_modify_type_size (thisarg, &dest.type, &dest.size);
12670 /* Convert Neon type and size into compact bitmask representation. */
12672 static enum neon_type_mask
12673 type_chk_of_el_type (enum neon_el_type type, unsigned size)
12680 case 8: return N_8;
12681 case 16: return N_16;
12682 case 32: return N_32;
12683 case 64: return N_64;
12691 case 8: return N_I8;
12692 case 16: return N_I16;
12693 case 32: return N_I32;
12694 case 64: return N_I64;
12702 case 16: return N_F16;
12703 case 32: return N_F32;
12704 case 64: return N_F64;
12712 case 8: return N_P8;
12713 case 16: return N_P16;
12721 case 8: return N_S8;
12722 case 16: return N_S16;
12723 case 32: return N_S32;
12724 case 64: return N_S64;
12732 case 8: return N_U8;
12733 case 16: return N_U16;
12734 case 32: return N_U32;
12735 case 64: return N_U64;
12746 /* Convert compact Neon bitmask type representation to a type and size. Only
12747 handles the case where a single bit is set in the mask. */
12750 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12751 enum neon_type_mask mask)
12753 if ((mask & N_EQK) != 0)
12756 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12758 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
12760 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12762 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
12767 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12769 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12770 *type = NT_unsigned;
12771 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12772 *type = NT_integer;
12773 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12774 *type = NT_untyped;
12775 else if ((mask & (N_P8 | N_P16)) != 0)
12777 else if ((mask & (N_F32 | N_F64)) != 0)
12785 /* Modify a bitmask of allowed types. This is only needed for type
12789 modify_types_allowed (unsigned allowed, unsigned mods)
12792 enum neon_el_type type;
12798 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12800 if (el_type_of_type_chk (&type, &size,
12801 (enum neon_type_mask) (allowed & i)) == SUCCESS)
12803 neon_modify_type_size (mods, &type, &size);
12804 destmask |= type_chk_of_el_type (type, size);
12811 /* Check type and return type classification.
12812 The manual states (paraphrase): If one datatype is given, it indicates the
12814 - the second operand, if there is one
12815 - the operand, if there is no second operand
12816 - the result, if there are no operands.
12817 This isn't quite good enough though, so we use a concept of a "key" datatype
12818 which is set on a per-instruction basis, which is the one which matters when
12819 only one data type is written.
12820 Note: this function has side-effects (e.g. filling in missing operands). All
12821 Neon instructions should call it before performing bit encoding. */
12823 static struct neon_type_el
12824 neon_check_type (unsigned els, enum neon_shape ns, ...)
12827 unsigned i, pass, key_el = 0;
12828 unsigned types[NEON_MAX_TYPE_ELS];
12829 enum neon_el_type k_type = NT_invtype;
12830 unsigned k_size = -1u;
12831 struct neon_type_el badtype = {NT_invtype, -1};
12832 unsigned key_allowed = 0;
12834 /* Optional registers in Neon instructions are always (not) in operand 1.
12835 Fill in the missing operand here, if it was omitted. */
12836 if (els > 1 && !inst.operands[1].present)
12837 inst.operands[1] = inst.operands[0];
12839 /* Suck up all the varargs. */
12841 for (i = 0; i < els; i++)
12843 unsigned thisarg = va_arg (ap, unsigned);
12844 if (thisarg == N_IGNORE_TYPE)
12849 types[i] = thisarg;
12850 if ((thisarg & N_KEY) != 0)
12855 if (inst.vectype.elems > 0)
12856 for (i = 0; i < els; i++)
12857 if (inst.operands[i].vectype.type != NT_invtype)
12859 first_error (_("types specified in both the mnemonic and operands"));
12863 /* Duplicate inst.vectype elements here as necessary.
12864 FIXME: No idea if this is exactly the same as the ARM assembler,
12865 particularly when an insn takes one register and one non-register
12867 if (inst.vectype.elems == 1 && els > 1)
12870 inst.vectype.elems = els;
12871 inst.vectype.el[key_el] = inst.vectype.el[0];
12872 for (j = 0; j < els; j++)
12874 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12877 else if (inst.vectype.elems == 0 && els > 0)
12880 /* No types were given after the mnemonic, so look for types specified
12881 after each operand. We allow some flexibility here; as long as the
12882 "key" operand has a type, we can infer the others. */
12883 for (j = 0; j < els; j++)
12884 if (inst.operands[j].vectype.type != NT_invtype)
12885 inst.vectype.el[j] = inst.operands[j].vectype;
12887 if (inst.operands[key_el].vectype.type != NT_invtype)
12889 for (j = 0; j < els; j++)
12890 if (inst.operands[j].vectype.type == NT_invtype)
12891 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12896 first_error (_("operand types can't be inferred"));
12900 else if (inst.vectype.elems != els)
12902 first_error (_("type specifier has the wrong number of parts"));
12906 for (pass = 0; pass < 2; pass++)
12908 for (i = 0; i < els; i++)
12910 unsigned thisarg = types[i];
12911 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12912 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12913 enum neon_el_type g_type = inst.vectype.el[i].type;
12914 unsigned g_size = inst.vectype.el[i].size;
12916 /* Decay more-specific signed & unsigned types to sign-insensitive
12917 integer types if sign-specific variants are unavailable. */
12918 if ((g_type == NT_signed || g_type == NT_unsigned)
12919 && (types_allowed & N_SU_ALL) == 0)
12920 g_type = NT_integer;
12922 /* If only untyped args are allowed, decay any more specific types to
12923 them. Some instructions only care about signs for some element
12924 sizes, so handle that properly. */
12925 if ((g_size == 8 && (types_allowed & N_8) != 0)
12926 || (g_size == 16 && (types_allowed & N_16) != 0)
12927 || (g_size == 32 && (types_allowed & N_32) != 0)
12928 || (g_size == 64 && (types_allowed & N_64) != 0))
12929 g_type = NT_untyped;
12933 if ((thisarg & N_KEY) != 0)
12937 key_allowed = thisarg & ~N_KEY;
12942 if ((thisarg & N_VFP) != 0)
12944 enum neon_shape_el regshape;
12945 unsigned regwidth, match;
12947 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
12950 first_error (_("invalid instruction shape"));
12953 regshape = neon_shape_tab[ns].el[i];
12954 regwidth = neon_shape_el_size[regshape];
12956 /* In VFP mode, operands must match register widths. If we
12957 have a key operand, use its width, else use the width of
12958 the current operand. */
12964 if (regwidth != match)
12966 first_error (_("operand size must match register width"));
12971 if ((thisarg & N_EQK) == 0)
12973 unsigned given_type = type_chk_of_el_type (g_type, g_size);
12975 if ((given_type & types_allowed) == 0)
12977 first_error (_("bad type in Neon instruction"));
12983 enum neon_el_type mod_k_type = k_type;
12984 unsigned mod_k_size = k_size;
12985 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12986 if (g_type != mod_k_type || g_size != mod_k_size)
12988 first_error (_("inconsistent types in Neon instruction"));
12996 return inst.vectype.el[key_el];
12999 /* Neon-style VFP instruction forwarding. */
13001 /* Thumb VFP instructions have 0xE in the condition field. */
13004 do_vfp_cond_or_thumb (void)
13009 inst.instruction |= 0xe0000000;
13011 inst.instruction |= inst.cond << 28;
13014 /* Look up and encode a simple mnemonic, for use as a helper function for the
13015 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13016 etc. It is assumed that operand parsing has already been done, and that the
13017 operands are in the form expected by the given opcode (this isn't necessarily
13018 the same as the form in which they were parsed, hence some massaging must
13019 take place before this function is called).
13020 Checks current arch version against that in the looked-up opcode. */
13023 do_vfp_nsyn_opcode (const char *opname)
13025 const struct asm_opcode *opcode;
13027 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13032 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13033 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13040 inst.instruction = opcode->tvalue;
13041 opcode->tencode ();
13045 inst.instruction = (inst.cond << 28) | opcode->avalue;
13046 opcode->aencode ();
13051 do_vfp_nsyn_add_sub (enum neon_shape rs)
13053 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13058 do_vfp_nsyn_opcode ("fadds");
13060 do_vfp_nsyn_opcode ("fsubs");
13065 do_vfp_nsyn_opcode ("faddd");
13067 do_vfp_nsyn_opcode ("fsubd");
13071 /* Check operand types to see if this is a VFP instruction, and if so call
13075 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13077 enum neon_shape rs;
13078 struct neon_type_el et;
13083 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13084 et = neon_check_type (2, rs,
13085 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13089 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13090 et = neon_check_type (3, rs,
13091 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13098 if (et.type != NT_invtype)
13109 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13111 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13116 do_vfp_nsyn_opcode ("fmacs");
13118 do_vfp_nsyn_opcode ("fnmacs");
13123 do_vfp_nsyn_opcode ("fmacd");
13125 do_vfp_nsyn_opcode ("fnmacd");
13130 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13132 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13137 do_vfp_nsyn_opcode ("ffmas");
13139 do_vfp_nsyn_opcode ("ffnmas");
13144 do_vfp_nsyn_opcode ("ffmad");
13146 do_vfp_nsyn_opcode ("ffnmad");
13151 do_vfp_nsyn_mul (enum neon_shape rs)
13154 do_vfp_nsyn_opcode ("fmuls");
13156 do_vfp_nsyn_opcode ("fmuld");
13160 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13162 int is_neg = (inst.instruction & 0x80) != 0;
13163 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13168 do_vfp_nsyn_opcode ("fnegs");
13170 do_vfp_nsyn_opcode ("fabss");
13175 do_vfp_nsyn_opcode ("fnegd");
13177 do_vfp_nsyn_opcode ("fabsd");
13181 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13182 insns belong to Neon, and are handled elsewhere. */
13185 do_vfp_nsyn_ldm_stm (int is_dbmode)
13187 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13191 do_vfp_nsyn_opcode ("fldmdbs");
13193 do_vfp_nsyn_opcode ("fldmias");
13198 do_vfp_nsyn_opcode ("fstmdbs");
13200 do_vfp_nsyn_opcode ("fstmias");
13205 do_vfp_nsyn_sqrt (void)
13207 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13208 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13211 do_vfp_nsyn_opcode ("fsqrts");
13213 do_vfp_nsyn_opcode ("fsqrtd");
13217 do_vfp_nsyn_div (void)
13219 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13220 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13221 N_F32 | N_F64 | N_KEY | N_VFP);
13224 do_vfp_nsyn_opcode ("fdivs");
13226 do_vfp_nsyn_opcode ("fdivd");
13230 do_vfp_nsyn_nmul (void)
13232 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13233 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13234 N_F32 | N_F64 | N_KEY | N_VFP);
13238 NEON_ENCODE (SINGLE, inst);
13239 do_vfp_sp_dyadic ();
13243 NEON_ENCODE (DOUBLE, inst);
13244 do_vfp_dp_rd_rn_rm ();
13246 do_vfp_cond_or_thumb ();
13250 do_vfp_nsyn_cmp (void)
13252 if (inst.operands[1].isreg)
13254 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13255 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13259 NEON_ENCODE (SINGLE, inst);
13260 do_vfp_sp_monadic ();
13264 NEON_ENCODE (DOUBLE, inst);
13265 do_vfp_dp_rd_rm ();
13270 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13271 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13273 switch (inst.instruction & 0x0fffffff)
13276 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13279 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13287 NEON_ENCODE (SINGLE, inst);
13288 do_vfp_sp_compare_z ();
13292 NEON_ENCODE (DOUBLE, inst);
13296 do_vfp_cond_or_thumb ();
13300 nsyn_insert_sp (void)
13302 inst.operands[1] = inst.operands[0];
13303 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13304 inst.operands[0].reg = REG_SP;
13305 inst.operands[0].isreg = 1;
13306 inst.operands[0].writeback = 1;
13307 inst.operands[0].present = 1;
13311 do_vfp_nsyn_push (void)
13314 if (inst.operands[1].issingle)
13315 do_vfp_nsyn_opcode ("fstmdbs");
13317 do_vfp_nsyn_opcode ("fstmdbd");
13321 do_vfp_nsyn_pop (void)
13324 if (inst.operands[1].issingle)
13325 do_vfp_nsyn_opcode ("fldmias");
13327 do_vfp_nsyn_opcode ("fldmiad");
13330 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13331 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13334 neon_dp_fixup (struct arm_it* insn)
13336 unsigned int i = insn->instruction;
13341 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13352 insn->instruction = i;
13355 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13359 neon_logbits (unsigned x)
13361 return ffs (x) - 4;
13364 #define LOW4(R) ((R) & 0xf)
13365 #define HI1(R) (((R) >> 4) & 1)
13367 /* Encode insns with bit pattern:
13369 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13370 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
13372 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13373 different meaning for some instruction. */
13376 neon_three_same (int isquad, int ubit, int size)
13378 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13379 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13380 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13381 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13382 inst.instruction |= LOW4 (inst.operands[2].reg);
13383 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13384 inst.instruction |= (isquad != 0) << 6;
13385 inst.instruction |= (ubit != 0) << 24;
13387 inst.instruction |= neon_logbits (size) << 20;
13389 neon_dp_fixup (&inst);
13392 /* Encode instructions of the form:
13394 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
13395 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
13397 Don't write size if SIZE == -1. */
13400 neon_two_same (int qbit, int ubit, int size)
13402 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13403 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13404 inst.instruction |= LOW4 (inst.operands[1].reg);
13405 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13406 inst.instruction |= (qbit != 0) << 6;
13407 inst.instruction |= (ubit != 0) << 24;
13410 inst.instruction |= neon_logbits (size) << 18;
13412 neon_dp_fixup (&inst);
13415 /* Neon instruction encoders, in approximate order of appearance. */
13418 do_neon_dyadic_i_su (void)
13420 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13421 struct neon_type_el et = neon_check_type (3, rs,
13422 N_EQK, N_EQK, N_SU_32 | N_KEY);
13423 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13427 do_neon_dyadic_i64_su (void)
13429 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13430 struct neon_type_el et = neon_check_type (3, rs,
13431 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13432 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13436 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13439 unsigned size = et.size >> 3;
13440 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13441 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13442 inst.instruction |= LOW4 (inst.operands[1].reg);
13443 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13444 inst.instruction |= (isquad != 0) << 6;
13445 inst.instruction |= immbits << 16;
13446 inst.instruction |= (size >> 3) << 7;
13447 inst.instruction |= (size & 0x7) << 19;
13449 inst.instruction |= (uval != 0) << 24;
13451 neon_dp_fixup (&inst);
13455 do_neon_shl_imm (void)
13457 if (!inst.operands[2].isreg)
13459 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13460 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13461 NEON_ENCODE (IMMED, inst);
13462 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13466 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13467 struct neon_type_el et = neon_check_type (3, rs,
13468 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13471 /* VSHL/VQSHL 3-register variants have syntax such as:
13473 whereas other 3-register operations encoded by neon_three_same have
13476 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13478 tmp = inst.operands[2].reg;
13479 inst.operands[2].reg = inst.operands[1].reg;
13480 inst.operands[1].reg = tmp;
13481 NEON_ENCODE (INTEGER, inst);
13482 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13487 do_neon_qshl_imm (void)
13489 if (!inst.operands[2].isreg)
13491 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13492 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13494 NEON_ENCODE (IMMED, inst);
13495 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13496 inst.operands[2].imm);
13500 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13501 struct neon_type_el et = neon_check_type (3, rs,
13502 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13505 /* See note in do_neon_shl_imm. */
13506 tmp = inst.operands[2].reg;
13507 inst.operands[2].reg = inst.operands[1].reg;
13508 inst.operands[1].reg = tmp;
13509 NEON_ENCODE (INTEGER, inst);
13510 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13515 do_neon_rshl (void)
13517 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13518 struct neon_type_el et = neon_check_type (3, rs,
13519 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13522 tmp = inst.operands[2].reg;
13523 inst.operands[2].reg = inst.operands[1].reg;
13524 inst.operands[1].reg = tmp;
13525 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13529 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13531 /* Handle .I8 pseudo-instructions. */
13534 /* Unfortunately, this will make everything apart from zero out-of-range.
13535 FIXME is this the intended semantics? There doesn't seem much point in
13536 accepting .I8 if so. */
13537 immediate |= immediate << 8;
13543 if (immediate == (immediate & 0x000000ff))
13545 *immbits = immediate;
13548 else if (immediate == (immediate & 0x0000ff00))
13550 *immbits = immediate >> 8;
13553 else if (immediate == (immediate & 0x00ff0000))
13555 *immbits = immediate >> 16;
13558 else if (immediate == (immediate & 0xff000000))
13560 *immbits = immediate >> 24;
13563 if ((immediate & 0xffff) != (immediate >> 16))
13564 goto bad_immediate;
13565 immediate &= 0xffff;
13568 if (immediate == (immediate & 0x000000ff))
13570 *immbits = immediate;
13573 else if (immediate == (immediate & 0x0000ff00))
13575 *immbits = immediate >> 8;
13580 first_error (_("immediate value out of range"));
13584 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13588 neon_bits_same_in_bytes (unsigned imm)
13590 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13591 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13592 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13593 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13596 /* For immediate of above form, return 0bABCD. */
13599 neon_squash_bits (unsigned imm)
13601 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13602 | ((imm & 0x01000000) >> 21);
13605 /* Compress quarter-float representation to 0b...000 abcdefgh. */
13608 neon_qfloat_bits (unsigned imm)
13610 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13613 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13614 the instruction. *OP is passed as the initial value of the op field, and
13615 may be set to a different value depending on the constant (i.e.
13616 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13617 MVN). If the immediate looks like a repeated pattern then also
13618 try smaller element sizes. */
13621 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13622 unsigned *immbits, int *op, int size,
13623 enum neon_el_type type)
13625 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13627 if (type == NT_float && !float_p)
13630 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13632 if (size != 32 || *op == 1)
13634 *immbits = neon_qfloat_bits (immlo);
13640 if (neon_bits_same_in_bytes (immhi)
13641 && neon_bits_same_in_bytes (immlo))
13645 *immbits = (neon_squash_bits (immhi) << 4)
13646 | neon_squash_bits (immlo);
13651 if (immhi != immlo)
13657 if (immlo == (immlo & 0x000000ff))
13662 else if (immlo == (immlo & 0x0000ff00))
13664 *immbits = immlo >> 8;
13667 else if (immlo == (immlo & 0x00ff0000))
13669 *immbits = immlo >> 16;
13672 else if (immlo == (immlo & 0xff000000))
13674 *immbits = immlo >> 24;
13677 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13679 *immbits = (immlo >> 8) & 0xff;
13682 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13684 *immbits = (immlo >> 16) & 0xff;
13688 if ((immlo & 0xffff) != (immlo >> 16))
13695 if (immlo == (immlo & 0x000000ff))
13700 else if (immlo == (immlo & 0x0000ff00))
13702 *immbits = immlo >> 8;
13706 if ((immlo & 0xff) != (immlo >> 8))
13711 if (immlo == (immlo & 0x000000ff))
13713 /* Don't allow MVN with 8-bit immediate. */
13723 /* Write immediate bits [7:0] to the following locations:
13725 |28/24|23 19|18 16|15 4|3 0|
13726 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13728 This function is used by VMOV/VMVN/VORR/VBIC. */
13731 neon_write_immbits (unsigned immbits)
13733 inst.instruction |= immbits & 0xf;
13734 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13735 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13738 /* Invert low-order SIZE bits of XHI:XLO. */
13741 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13743 unsigned immlo = xlo ? *xlo : 0;
13744 unsigned immhi = xhi ? *xhi : 0;
13749 immlo = (~immlo) & 0xff;
13753 immlo = (~immlo) & 0xffff;
13757 immhi = (~immhi) & 0xffffffff;
13758 /* fall through. */
13761 immlo = (~immlo) & 0xffffffff;
13776 do_neon_logic (void)
13778 if (inst.operands[2].present && inst.operands[2].isreg)
13780 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13781 neon_check_type (3, rs, N_IGNORE_TYPE);
13782 /* U bit and size field were set as part of the bitmask. */
13783 NEON_ENCODE (INTEGER, inst);
13784 neon_three_same (neon_quad (rs), 0, -1);
13788 const int three_ops_form = (inst.operands[2].present
13789 && !inst.operands[2].isreg);
13790 const int immoperand = (three_ops_form ? 2 : 1);
13791 enum neon_shape rs = (three_ops_form
13792 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13793 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13794 struct neon_type_el et = neon_check_type (2, rs,
13795 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13796 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13800 if (et.type == NT_invtype)
13803 if (three_ops_form)
13804 constraint (inst.operands[0].reg != inst.operands[1].reg,
13805 _("first and second operands shall be the same register"));
13807 NEON_ENCODE (IMMED, inst);
13809 immbits = inst.operands[immoperand].imm;
13812 /* .i64 is a pseudo-op, so the immediate must be a repeating
13814 if (immbits != (inst.operands[immoperand].regisimm ?
13815 inst.operands[immoperand].reg : 0))
13817 /* Set immbits to an invalid constant. */
13818 immbits = 0xdeadbeef;
13825 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13829 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13833 /* Pseudo-instruction for VBIC. */
13834 neon_invert_size (&immbits, 0, et.size);
13835 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13839 /* Pseudo-instruction for VORR. */
13840 neon_invert_size (&immbits, 0, et.size);
13841 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13851 inst.instruction |= neon_quad (rs) << 6;
13852 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13853 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13854 inst.instruction |= cmode << 8;
13855 neon_write_immbits (immbits);
13857 neon_dp_fixup (&inst);
13862 do_neon_bitfield (void)
13864 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13865 neon_check_type (3, rs, N_IGNORE_TYPE);
13866 neon_three_same (neon_quad (rs), 0, -1);
13870 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13873 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13874 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13876 if (et.type == NT_float)
13878 NEON_ENCODE (FLOAT, inst);
13879 neon_three_same (neon_quad (rs), 0, -1);
13883 NEON_ENCODE (INTEGER, inst);
13884 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13889 do_neon_dyadic_if_su (void)
13891 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13895 do_neon_dyadic_if_su_d (void)
13897 /* This version only allow D registers, but that constraint is enforced during
13898 operand parsing so we don't need to do anything extra here. */
13899 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13903 do_neon_dyadic_if_i_d (void)
13905 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13906 affected if we specify unsigned args. */
13907 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13910 enum vfp_or_neon_is_neon_bits
13913 NEON_CHECK_ARCH = 2
13916 /* Call this function if an instruction which may have belonged to the VFP or
13917 Neon instruction sets, but turned out to be a Neon instruction (due to the
13918 operand types involved, etc.). We have to check and/or fix-up a couple of
13921 - Make sure the user hasn't attempted to make a Neon instruction
13923 - Alter the value in the condition code field if necessary.
13924 - Make sure that the arch supports Neon instructions.
13926 Which of these operations take place depends on bits from enum
13927 vfp_or_neon_is_neon_bits.
13929 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13930 current instruction's condition is COND_ALWAYS, the condition field is
13931 changed to inst.uncond_value. This is necessary because instructions shared
13932 between VFP and Neon may be conditional for the VFP variants only, and the
13933 unconditional Neon version must have, e.g., 0xF in the condition field. */
13936 vfp_or_neon_is_neon (unsigned check)
13938 /* Conditions are always legal in Thumb mode (IT blocks). */
13939 if (!thumb_mode && (check & NEON_CHECK_CC))
13941 if (inst.cond != COND_ALWAYS)
13943 first_error (_(BAD_COND));
13946 if (inst.uncond_value != -1)
13947 inst.instruction |= inst.uncond_value << 28;
13950 if ((check & NEON_CHECK_ARCH)
13951 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13953 first_error (_(BAD_FPU));
13961 do_neon_addsub_if_i (void)
13963 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13966 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13969 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13970 affected if we specify unsigned args. */
13971 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13974 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13976 V<op> A,B (A is operand 0, B is operand 2)
13981 so handle that case specially. */
13984 neon_exchange_operands (void)
13986 void *scratch = alloca (sizeof (inst.operands[0]));
13987 if (inst.operands[1].present)
13989 /* Swap operands[1] and operands[2]. */
13990 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13991 inst.operands[1] = inst.operands[2];
13992 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13996 inst.operands[1] = inst.operands[2];
13997 inst.operands[2] = inst.operands[0];
14002 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14004 if (inst.operands[2].isreg)
14007 neon_exchange_operands ();
14008 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14012 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14013 struct neon_type_el et = neon_check_type (2, rs,
14014 N_EQK | N_SIZ, immtypes | N_KEY);
14016 NEON_ENCODE (IMMED, inst);
14017 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14018 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14019 inst.instruction |= LOW4 (inst.operands[1].reg);
14020 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14021 inst.instruction |= neon_quad (rs) << 6;
14022 inst.instruction |= (et.type == NT_float) << 10;
14023 inst.instruction |= neon_logbits (et.size) << 18;
14025 neon_dp_fixup (&inst);
14032 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14036 do_neon_cmp_inv (void)
14038 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14044 neon_compare (N_IF_32, N_IF_32, FALSE);
14047 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14048 scalars, which are encoded in 5 bits, M : Rm.
14049 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14050 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14054 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14056 unsigned regno = NEON_SCALAR_REG (scalar);
14057 unsigned elno = NEON_SCALAR_INDEX (scalar);
14062 if (regno > 7 || elno > 3)
14064 return regno | (elno << 3);
14067 if (regno > 15 || elno > 1)
14069 return regno | (elno << 4);
14073 first_error (_("scalar out of range for multiply instruction"));
14079 /* Encode multiply / multiply-accumulate scalar instructions. */
14082 neon_mul_mac (struct neon_type_el et, int ubit)
14086 /* Give a more helpful error message if we have an invalid type. */
14087 if (et.type == NT_invtype)
14090 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14091 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14092 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14093 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14094 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14095 inst.instruction |= LOW4 (scalar);
14096 inst.instruction |= HI1 (scalar) << 5;
14097 inst.instruction |= (et.type == NT_float) << 8;
14098 inst.instruction |= neon_logbits (et.size) << 20;
14099 inst.instruction |= (ubit != 0) << 24;
14101 neon_dp_fixup (&inst);
14105 do_neon_mac_maybe_scalar (void)
14107 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14110 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14113 if (inst.operands[2].isscalar)
14115 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14116 struct neon_type_el et = neon_check_type (3, rs,
14117 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14118 NEON_ENCODE (SCALAR, inst);
14119 neon_mul_mac (et, neon_quad (rs));
14123 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14124 affected if we specify unsigned args. */
14125 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14130 do_neon_fmac (void)
14132 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14135 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14138 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14144 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14145 struct neon_type_el et = neon_check_type (3, rs,
14146 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14147 neon_three_same (neon_quad (rs), 0, et.size);
14150 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14151 same types as the MAC equivalents. The polynomial type for this instruction
14152 is encoded the same as the integer type. */
14157 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14160 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14163 if (inst.operands[2].isscalar)
14164 do_neon_mac_maybe_scalar ();
14166 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14170 do_neon_qdmulh (void)
14172 if (inst.operands[2].isscalar)
14174 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14175 struct neon_type_el et = neon_check_type (3, rs,
14176 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14177 NEON_ENCODE (SCALAR, inst);
14178 neon_mul_mac (et, neon_quad (rs));
14182 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14183 struct neon_type_el et = neon_check_type (3, rs,
14184 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14185 NEON_ENCODE (INTEGER, inst);
14186 /* The U bit (rounding) comes from bit mask. */
14187 neon_three_same (neon_quad (rs), 0, et.size);
14192 do_neon_fcmp_absolute (void)
14194 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14195 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14196 /* Size field comes from bit mask. */
14197 neon_three_same (neon_quad (rs), 1, -1);
14201 do_neon_fcmp_absolute_inv (void)
14203 neon_exchange_operands ();
14204 do_neon_fcmp_absolute ();
14208 do_neon_step (void)
14210 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14211 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14212 neon_three_same (neon_quad (rs), 0, -1);
14216 do_neon_abs_neg (void)
14218 enum neon_shape rs;
14219 struct neon_type_el et;
14221 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14224 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14227 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14228 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14230 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14231 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14232 inst.instruction |= LOW4 (inst.operands[1].reg);
14233 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14234 inst.instruction |= neon_quad (rs) << 6;
14235 inst.instruction |= (et.type == NT_float) << 10;
14236 inst.instruction |= neon_logbits (et.size) << 18;
14238 neon_dp_fixup (&inst);
14244 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14245 struct neon_type_el et = neon_check_type (2, rs,
14246 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14247 int imm = inst.operands[2].imm;
14248 constraint (imm < 0 || (unsigned)imm >= et.size,
14249 _("immediate out of range for insert"));
14250 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14256 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14257 struct neon_type_el et = neon_check_type (2, rs,
14258 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14259 int imm = inst.operands[2].imm;
14260 constraint (imm < 1 || (unsigned)imm > et.size,
14261 _("immediate out of range for insert"));
14262 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14266 do_neon_qshlu_imm (void)
14268 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14269 struct neon_type_el et = neon_check_type (2, rs,
14270 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14271 int imm = inst.operands[2].imm;
14272 constraint (imm < 0 || (unsigned)imm >= et.size,
14273 _("immediate out of range for shift"));
14274 /* Only encodes the 'U present' variant of the instruction.
14275 In this case, signed types have OP (bit 8) set to 0.
14276 Unsigned types have OP set to 1. */
14277 inst.instruction |= (et.type == NT_unsigned) << 8;
14278 /* The rest of the bits are the same as other immediate shifts. */
14279 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14283 do_neon_qmovn (void)
14285 struct neon_type_el et = neon_check_type (2, NS_DQ,
14286 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14287 /* Saturating move where operands can be signed or unsigned, and the
14288 destination has the same signedness. */
14289 NEON_ENCODE (INTEGER, inst);
14290 if (et.type == NT_unsigned)
14291 inst.instruction |= 0xc0;
14293 inst.instruction |= 0x80;
14294 neon_two_same (0, 1, et.size / 2);
14298 do_neon_qmovun (void)
14300 struct neon_type_el et = neon_check_type (2, NS_DQ,
14301 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14302 /* Saturating move with unsigned results. Operands must be signed. */
14303 NEON_ENCODE (INTEGER, inst);
14304 neon_two_same (0, 1, et.size / 2);
14308 do_neon_rshift_sat_narrow (void)
14310 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14311 or unsigned. If operands are unsigned, results must also be unsigned. */
14312 struct neon_type_el et = neon_check_type (2, NS_DQI,
14313 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14314 int imm = inst.operands[2].imm;
14315 /* This gets the bounds check, size encoding and immediate bits calculation
14319 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14320 VQMOVN.I<size> <Dd>, <Qm>. */
14323 inst.operands[2].present = 0;
14324 inst.instruction = N_MNEM_vqmovn;
14329 constraint (imm < 1 || (unsigned)imm > et.size,
14330 _("immediate out of range"));
14331 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14335 do_neon_rshift_sat_narrow_u (void)
14337 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14338 or unsigned. If operands are unsigned, results must also be unsigned. */
14339 struct neon_type_el et = neon_check_type (2, NS_DQI,
14340 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14341 int imm = inst.operands[2].imm;
14342 /* This gets the bounds check, size encoding and immediate bits calculation
14346 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14347 VQMOVUN.I<size> <Dd>, <Qm>. */
14350 inst.operands[2].present = 0;
14351 inst.instruction = N_MNEM_vqmovun;
14356 constraint (imm < 1 || (unsigned)imm > et.size,
14357 _("immediate out of range"));
14358 /* FIXME: The manual is kind of unclear about what value U should have in
14359 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14361 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14365 do_neon_movn (void)
14367 struct neon_type_el et = neon_check_type (2, NS_DQ,
14368 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14369 NEON_ENCODE (INTEGER, inst);
14370 neon_two_same (0, 1, et.size / 2);
14374 do_neon_rshift_narrow (void)
14376 struct neon_type_el et = neon_check_type (2, NS_DQI,
14377 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14378 int imm = inst.operands[2].imm;
14379 /* This gets the bounds check, size encoding and immediate bits calculation
14383 /* If immediate is zero then we are a pseudo-instruction for
14384 VMOVN.I<size> <Dd>, <Qm> */
14387 inst.operands[2].present = 0;
14388 inst.instruction = N_MNEM_vmovn;
14393 constraint (imm < 1 || (unsigned)imm > et.size,
14394 _("immediate out of range for narrowing operation"));
14395 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14399 do_neon_shll (void)
14401 /* FIXME: Type checking when lengthening. */
14402 struct neon_type_el et = neon_check_type (2, NS_QDI,
14403 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14404 unsigned imm = inst.operands[2].imm;
14406 if (imm == et.size)
14408 /* Maximum shift variant. */
14409 NEON_ENCODE (INTEGER, inst);
14410 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14411 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14412 inst.instruction |= LOW4 (inst.operands[1].reg);
14413 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14414 inst.instruction |= neon_logbits (et.size) << 18;
14416 neon_dp_fixup (&inst);
14420 /* A more-specific type check for non-max versions. */
14421 et = neon_check_type (2, NS_QDI,
14422 N_EQK | N_DBL, N_SU_32 | N_KEY);
14423 NEON_ENCODE (IMMED, inst);
14424 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14428 /* Check the various types for the VCVT instruction, and return which version
14429 the current instruction is. */
14432 neon_cvt_flavour (enum neon_shape rs)
14434 #define CVT_VAR(C,X,Y) \
14435 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
14436 if (et.type != NT_invtype) \
14438 inst.error = NULL; \
14441 struct neon_type_el et;
14442 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14443 || rs == NS_FF) ? N_VFP : 0;
14444 /* The instruction versions which take an immediate take one register
14445 argument, which is extended to the width of the full register. Thus the
14446 "source" and "destination" registers must have the same width. Hack that
14447 here by making the size equal to the key (wider, in this case) operand. */
14448 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14450 CVT_VAR (0, N_S32, N_F32);
14451 CVT_VAR (1, N_U32, N_F32);
14452 CVT_VAR (2, N_F32, N_S32);
14453 CVT_VAR (3, N_F32, N_U32);
14454 /* Half-precision conversions. */
14455 CVT_VAR (4, N_F32, N_F16);
14456 CVT_VAR (5, N_F16, N_F32);
14460 /* VFP instructions. */
14461 CVT_VAR (6, N_F32, N_F64);
14462 CVT_VAR (7, N_F64, N_F32);
14463 CVT_VAR (8, N_S32, N_F64 | key);
14464 CVT_VAR (9, N_U32, N_F64 | key);
14465 CVT_VAR (10, N_F64 | key, N_S32);
14466 CVT_VAR (11, N_F64 | key, N_U32);
14467 /* VFP instructions with bitshift. */
14468 CVT_VAR (12, N_F32 | key, N_S16);
14469 CVT_VAR (13, N_F32 | key, N_U16);
14470 CVT_VAR (14, N_F64 | key, N_S16);
14471 CVT_VAR (15, N_F64 | key, N_U16);
14472 CVT_VAR (16, N_S16, N_F32 | key);
14473 CVT_VAR (17, N_U16, N_F32 | key);
14474 CVT_VAR (18, N_S16, N_F64 | key);
14475 CVT_VAR (19, N_U16, N_F64 | key);
14481 /* Neon-syntax VFP conversions. */
14484 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
14486 const char *opname = 0;
14488 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14490 /* Conversions with immediate bitshift. */
14491 const char *enc[] =
14515 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14517 opname = enc[flavour];
14518 constraint (inst.operands[0].reg != inst.operands[1].reg,
14519 _("operands 0 and 1 must be the same register"));
14520 inst.operands[1] = inst.operands[2];
14521 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14526 /* Conversions without bitshift. */
14527 const char *enc[] =
14543 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14544 opname = enc[flavour];
14548 do_vfp_nsyn_opcode (opname);
14552 do_vfp_nsyn_cvtz (void)
14554 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14555 int flavour = neon_cvt_flavour (rs);
14556 const char *enc[] =
14570 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14571 do_vfp_nsyn_opcode (enc[flavour]);
14575 do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
14577 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14578 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14579 int flavour = neon_cvt_flavour (rs);
14581 /* PR11109: Handle round-to-zero for VCVT conversions. */
14583 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14584 && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
14585 && (rs == NS_FD || rs == NS_FF))
14587 do_vfp_nsyn_cvtz ();
14591 /* VFP rather than Neon conversions. */
14594 do_vfp_nsyn_cvt (rs, flavour);
14604 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14606 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14609 /* Fixed-point conversion with #0 immediate is encoded as an
14610 integer conversion. */
14611 if (inst.operands[2].present && inst.operands[2].imm == 0)
14613 immbits = 32 - inst.operands[2].imm;
14614 NEON_ENCODE (IMMED, inst);
14616 inst.instruction |= enctab[flavour];
14617 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14618 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14619 inst.instruction |= LOW4 (inst.operands[1].reg);
14620 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14621 inst.instruction |= neon_quad (rs) << 6;
14622 inst.instruction |= 1 << 21;
14623 inst.instruction |= immbits << 16;
14625 neon_dp_fixup (&inst);
14633 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14635 NEON_ENCODE (INTEGER, inst);
14637 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14641 inst.instruction |= enctab[flavour];
14643 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14644 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14645 inst.instruction |= LOW4 (inst.operands[1].reg);
14646 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14647 inst.instruction |= neon_quad (rs) << 6;
14648 inst.instruction |= 2 << 18;
14650 neon_dp_fixup (&inst);
14654 /* Half-precision conversions for Advanced SIMD -- neon. */
14659 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14661 as_bad (_("operand size must match register width"));
14666 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14668 as_bad (_("operand size must match register width"));
14673 inst.instruction = 0x3b60600;
14675 inst.instruction = 0x3b60700;
14677 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14678 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14679 inst.instruction |= LOW4 (inst.operands[1].reg);
14680 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14681 neon_dp_fixup (&inst);
14685 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
14686 do_vfp_nsyn_cvt (rs, flavour);
14691 do_neon_cvtr (void)
14693 do_neon_cvt_1 (FALSE);
14699 do_neon_cvt_1 (TRUE);
14703 do_neon_cvtb (void)
14705 inst.instruction = 0xeb20a40;
14707 /* The sizes are attached to the mnemonic. */
14708 if (inst.vectype.el[0].type != NT_invtype
14709 && inst.vectype.el[0].size == 16)
14710 inst.instruction |= 0x00010000;
14712 /* Programmer's syntax: the sizes are attached to the operands. */
14713 else if (inst.operands[0].vectype.type != NT_invtype
14714 && inst.operands[0].vectype.size == 16)
14715 inst.instruction |= 0x00010000;
14717 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14718 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
14719 do_vfp_cond_or_thumb ();
14724 do_neon_cvtt (void)
14727 inst.instruction |= 0x80;
14731 neon_move_immediate (void)
14733 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14734 struct neon_type_el et = neon_check_type (2, rs,
14735 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14736 unsigned immlo, immhi = 0, immbits;
14737 int op, cmode, float_p;
14739 constraint (et.type == NT_invtype,
14740 _("operand size must be specified for immediate VMOV"));
14742 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
14743 op = (inst.instruction & (1 << 5)) != 0;
14745 immlo = inst.operands[1].imm;
14746 if (inst.operands[1].regisimm)
14747 immhi = inst.operands[1].reg;
14749 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14750 _("immediate has bits set outside the operand size"));
14752 float_p = inst.operands[1].immisfloat;
14754 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14755 et.size, et.type)) == FAIL)
14757 /* Invert relevant bits only. */
14758 neon_invert_size (&immlo, &immhi, et.size);
14759 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14760 with one or the other; those cases are caught by
14761 neon_cmode_for_move_imm. */
14763 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14764 &op, et.size, et.type)) == FAIL)
14766 first_error (_("immediate out of range"));
14771 inst.instruction &= ~(1 << 5);
14772 inst.instruction |= op << 5;
14774 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14775 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14776 inst.instruction |= neon_quad (rs) << 6;
14777 inst.instruction |= cmode << 8;
14779 neon_write_immbits (immbits);
14785 if (inst.operands[1].isreg)
14787 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14789 NEON_ENCODE (INTEGER, inst);
14790 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14791 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14792 inst.instruction |= LOW4 (inst.operands[1].reg);
14793 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14794 inst.instruction |= neon_quad (rs) << 6;
14798 NEON_ENCODE (IMMED, inst);
14799 neon_move_immediate ();
14802 neon_dp_fixup (&inst);
14805 /* Encode instructions of form:
14807 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14808 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
14811 neon_mixed_length (struct neon_type_el et, unsigned size)
14813 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14814 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14815 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14816 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14817 inst.instruction |= LOW4 (inst.operands[2].reg);
14818 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14819 inst.instruction |= (et.type == NT_unsigned) << 24;
14820 inst.instruction |= neon_logbits (size) << 20;
14822 neon_dp_fixup (&inst);
14826 do_neon_dyadic_long (void)
14828 /* FIXME: Type checking for lengthening op. */
14829 struct neon_type_el et = neon_check_type (3, NS_QDD,
14830 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
14831 neon_mixed_length (et, et.size);
14835 do_neon_abal (void)
14837 struct neon_type_el et = neon_check_type (3, NS_QDD,
14838 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
14839 neon_mixed_length (et, et.size);
14843 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
14845 if (inst.operands[2].isscalar)
14847 struct neon_type_el et = neon_check_type (3, NS_QDS,
14848 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
14849 NEON_ENCODE (SCALAR, inst);
14850 neon_mul_mac (et, et.type == NT_unsigned);
14854 struct neon_type_el et = neon_check_type (3, NS_QDD,
14855 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
14856 NEON_ENCODE (INTEGER, inst);
14857 neon_mixed_length (et, et.size);
14862 do_neon_mac_maybe_scalar_long (void)
14864 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
14868 do_neon_dyadic_wide (void)
14870 struct neon_type_el et = neon_check_type (3, NS_QQD,
14871 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
14872 neon_mixed_length (et, et.size);
14876 do_neon_dyadic_narrow (void)
14878 struct neon_type_el et = neon_check_type (3, NS_QDD,
14879 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
14880 /* Operand sign is unimportant, and the U bit is part of the opcode,
14881 so force the operand type to integer. */
14882 et.type = NT_integer;
14883 neon_mixed_length (et, et.size / 2);
14887 do_neon_mul_sat_scalar_long (void)
14889 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
14893 do_neon_vmull (void)
14895 if (inst.operands[2].isscalar)
14896 do_neon_mac_maybe_scalar_long ();
14899 struct neon_type_el et = neon_check_type (3, NS_QDD,
14900 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
14901 if (et.type == NT_poly)
14902 NEON_ENCODE (POLY, inst);
14904 NEON_ENCODE (INTEGER, inst);
14905 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14906 zero. Should be OK as-is. */
14907 neon_mixed_length (et, et.size);
14914 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14915 struct neon_type_el et = neon_check_type (3, rs,
14916 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14917 unsigned imm = (inst.operands[3].imm * et.size) / 8;
14919 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14920 _("shift out of range"));
14921 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14922 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14923 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14924 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14925 inst.instruction |= LOW4 (inst.operands[2].reg);
14926 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14927 inst.instruction |= neon_quad (rs) << 6;
14928 inst.instruction |= imm << 8;
14930 neon_dp_fixup (&inst);
14936 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14937 struct neon_type_el et = neon_check_type (2, rs,
14938 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14939 unsigned op = (inst.instruction >> 7) & 3;
14940 /* N (width of reversed regions) is encoded as part of the bitmask. We
14941 extract it here to check the elements to be reversed are smaller.
14942 Otherwise we'd get a reserved instruction. */
14943 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14944 gas_assert (elsize != 0);
14945 constraint (et.size >= elsize,
14946 _("elements must be smaller than reversal region"));
14947 neon_two_same (neon_quad (rs), 1, et.size);
14953 if (inst.operands[1].isscalar)
14955 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14956 struct neon_type_el et = neon_check_type (2, rs,
14957 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14958 unsigned sizebits = et.size >> 3;
14959 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14960 int logsize = neon_logbits (et.size);
14961 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14963 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14966 NEON_ENCODE (SCALAR, inst);
14967 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14968 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14969 inst.instruction |= LOW4 (dm);
14970 inst.instruction |= HI1 (dm) << 5;
14971 inst.instruction |= neon_quad (rs) << 6;
14972 inst.instruction |= x << 17;
14973 inst.instruction |= sizebits << 16;
14975 neon_dp_fixup (&inst);
14979 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14980 struct neon_type_el et = neon_check_type (2, rs,
14981 N_8 | N_16 | N_32 | N_KEY, N_EQK);
14982 /* Duplicate ARM register to lanes of vector. */
14983 NEON_ENCODE (ARMREG, inst);
14986 case 8: inst.instruction |= 0x400000; break;
14987 case 16: inst.instruction |= 0x000020; break;
14988 case 32: inst.instruction |= 0x000000; break;
14991 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14992 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14993 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14994 inst.instruction |= neon_quad (rs) << 21;
14995 /* The encoding for this instruction is identical for the ARM and Thumb
14996 variants, except for the condition field. */
14997 do_vfp_cond_or_thumb ();
15001 /* VMOV has particularly many variations. It can be one of:
15002 0. VMOV<c><q> <Qd>, <Qm>
15003 1. VMOV<c><q> <Dd>, <Dm>
15004 (Register operations, which are VORR with Rm = Rn.)
15005 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15006 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15008 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15009 (ARM register to scalar.)
15010 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15011 (Two ARM registers to vector.)
15012 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15013 (Scalar to ARM register.)
15014 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15015 (Vector to two ARM registers.)
15016 8. VMOV.F32 <Sd>, <Sm>
15017 9. VMOV.F64 <Dd>, <Dm>
15018 (VFP register moves.)
15019 10. VMOV.F32 <Sd>, #imm
15020 11. VMOV.F64 <Dd>, #imm
15021 (VFP float immediate load.)
15022 12. VMOV <Rd>, <Sm>
15023 (VFP single to ARM reg.)
15024 13. VMOV <Sd>, <Rm>
15025 (ARM reg to VFP single.)
15026 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15027 (Two ARM regs to two VFP singles.)
15028 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15029 (Two VFP singles to two ARM regs.)
15031 These cases can be disambiguated using neon_select_shape, except cases 1/9
15032 and 3/11 which depend on the operand type too.
15034 All the encoded bits are hardcoded by this function.
15036 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15037 Cases 5, 7 may be used with VFPv2 and above.
15039 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15040 can specify a type where it doesn't make sense to, and is ignored). */
15045 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15046 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15048 struct neon_type_el et;
15049 const char *ldconst = 0;
15053 case NS_DD: /* case 1/9. */
15054 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15055 /* It is not an error here if no type is given. */
15057 if (et.type == NT_float && et.size == 64)
15059 do_vfp_nsyn_opcode ("fcpyd");
15062 /* fall through. */
15064 case NS_QQ: /* case 0/1. */
15066 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15068 /* The architecture manual I have doesn't explicitly state which
15069 value the U bit should have for register->register moves, but
15070 the equivalent VORR instruction has U = 0, so do that. */
15071 inst.instruction = 0x0200110;
15072 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15073 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15074 inst.instruction |= LOW4 (inst.operands[1].reg);
15075 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15076 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15077 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15078 inst.instruction |= neon_quad (rs) << 6;
15080 neon_dp_fixup (&inst);
15084 case NS_DI: /* case 3/11. */
15085 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15087 if (et.type == NT_float && et.size == 64)
15089 /* case 11 (fconstd). */
15090 ldconst = "fconstd";
15091 goto encode_fconstd;
15093 /* fall through. */
15095 case NS_QI: /* case 2/3. */
15096 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15098 inst.instruction = 0x0800010;
15099 neon_move_immediate ();
15100 neon_dp_fixup (&inst);
15103 case NS_SR: /* case 4. */
15105 unsigned bcdebits = 0;
15107 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15108 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15110 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15111 logsize = neon_logbits (et.size);
15113 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15115 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15116 && et.size != 32, _(BAD_FPU));
15117 constraint (et.type == NT_invtype, _("bad type for scalar"));
15118 constraint (x >= 64 / et.size, _("scalar index out of range"));
15122 case 8: bcdebits = 0x8; break;
15123 case 16: bcdebits = 0x1; break;
15124 case 32: bcdebits = 0x0; break;
15128 bcdebits |= x << logsize;
15130 inst.instruction = 0xe000b10;
15131 do_vfp_cond_or_thumb ();
15132 inst.instruction |= LOW4 (dn) << 16;
15133 inst.instruction |= HI1 (dn) << 7;
15134 inst.instruction |= inst.operands[1].reg << 12;
15135 inst.instruction |= (bcdebits & 3) << 5;
15136 inst.instruction |= (bcdebits >> 2) << 21;
15140 case NS_DRR: /* case 5 (fmdrr). */
15141 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15144 inst.instruction = 0xc400b10;
15145 do_vfp_cond_or_thumb ();
15146 inst.instruction |= LOW4 (inst.operands[0].reg);
15147 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15148 inst.instruction |= inst.operands[1].reg << 12;
15149 inst.instruction |= inst.operands[2].reg << 16;
15152 case NS_RS: /* case 6. */
15155 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15156 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15157 unsigned abcdebits = 0;
15159 et = neon_check_type (2, NS_NULL,
15160 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15161 logsize = neon_logbits (et.size);
15163 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15165 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15166 && et.size != 32, _(BAD_FPU));
15167 constraint (et.type == NT_invtype, _("bad type for scalar"));
15168 constraint (x >= 64 / et.size, _("scalar index out of range"));
15172 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15173 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15174 case 32: abcdebits = 0x00; break;
15178 abcdebits |= x << logsize;
15179 inst.instruction = 0xe100b10;
15180 do_vfp_cond_or_thumb ();
15181 inst.instruction |= LOW4 (dn) << 16;
15182 inst.instruction |= HI1 (dn) << 7;
15183 inst.instruction |= inst.operands[0].reg << 12;
15184 inst.instruction |= (abcdebits & 3) << 5;
15185 inst.instruction |= (abcdebits >> 2) << 21;
15189 case NS_RRD: /* case 7 (fmrrd). */
15190 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15193 inst.instruction = 0xc500b10;
15194 do_vfp_cond_or_thumb ();
15195 inst.instruction |= inst.operands[0].reg << 12;
15196 inst.instruction |= inst.operands[1].reg << 16;
15197 inst.instruction |= LOW4 (inst.operands[2].reg);
15198 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15201 case NS_FF: /* case 8 (fcpys). */
15202 do_vfp_nsyn_opcode ("fcpys");
15205 case NS_FI: /* case 10 (fconsts). */
15206 ldconst = "fconsts";
15208 if (is_quarter_float (inst.operands[1].imm))
15210 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15211 do_vfp_nsyn_opcode (ldconst);
15214 first_error (_("immediate out of range"));
15217 case NS_RF: /* case 12 (fmrs). */
15218 do_vfp_nsyn_opcode ("fmrs");
15221 case NS_FR: /* case 13 (fmsr). */
15222 do_vfp_nsyn_opcode ("fmsr");
15225 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15226 (one of which is a list), but we have parsed four. Do some fiddling to
15227 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15229 case NS_RRFF: /* case 14 (fmrrs). */
15230 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15231 _("VFP registers must be adjacent"));
15232 inst.operands[2].imm = 2;
15233 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15234 do_vfp_nsyn_opcode ("fmrrs");
15237 case NS_FFRR: /* case 15 (fmsrr). */
15238 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15239 _("VFP registers must be adjacent"));
15240 inst.operands[1] = inst.operands[2];
15241 inst.operands[2] = inst.operands[3];
15242 inst.operands[0].imm = 2;
15243 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15244 do_vfp_nsyn_opcode ("fmsrr");
15253 do_neon_rshift_round_imm (void)
15255 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15256 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15257 int imm = inst.operands[2].imm;
15259 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15262 inst.operands[2].present = 0;
15267 constraint (imm < 1 || (unsigned)imm > et.size,
15268 _("immediate out of range for shift"));
15269 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15274 do_neon_movl (void)
15276 struct neon_type_el et = neon_check_type (2, NS_QD,
15277 N_EQK | N_DBL, N_SU_32 | N_KEY);
15278 unsigned sizebits = et.size >> 3;
15279 inst.instruction |= sizebits << 19;
15280 neon_two_same (0, et.type == NT_unsigned, -1);
15286 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15287 struct neon_type_el et = neon_check_type (2, rs,
15288 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15289 NEON_ENCODE (INTEGER, inst);
15290 neon_two_same (neon_quad (rs), 1, et.size);
15294 do_neon_zip_uzp (void)
15296 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15297 struct neon_type_el et = neon_check_type (2, rs,
15298 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15299 if (rs == NS_DD && et.size == 32)
15301 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15302 inst.instruction = N_MNEM_vtrn;
15306 neon_two_same (neon_quad (rs), 1, et.size);
15310 do_neon_sat_abs_neg (void)
15312 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15313 struct neon_type_el et = neon_check_type (2, rs,
15314 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15315 neon_two_same (neon_quad (rs), 1, et.size);
15319 do_neon_pair_long (void)
15321 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15322 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15323 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15324 inst.instruction |= (et.type == NT_unsigned) << 7;
15325 neon_two_same (neon_quad (rs), 1, et.size);
15329 do_neon_recip_est (void)
15331 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15332 struct neon_type_el et = neon_check_type (2, rs,
15333 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15334 inst.instruction |= (et.type == NT_float) << 8;
15335 neon_two_same (neon_quad (rs), 1, et.size);
15341 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15342 struct neon_type_el et = neon_check_type (2, rs,
15343 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15344 neon_two_same (neon_quad (rs), 1, et.size);
15350 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15351 struct neon_type_el et = neon_check_type (2, rs,
15352 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15353 neon_two_same (neon_quad (rs), 1, et.size);
15359 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15360 struct neon_type_el et = neon_check_type (2, rs,
15361 N_EQK | N_INT, N_8 | N_KEY);
15362 neon_two_same (neon_quad (rs), 1, et.size);
15368 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15369 neon_two_same (neon_quad (rs), 1, -1);
15373 do_neon_tbl_tbx (void)
15375 unsigned listlenbits;
15376 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15378 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15380 first_error (_("bad list length for table lookup"));
15384 listlenbits = inst.operands[1].imm - 1;
15385 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15386 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15387 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15388 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15389 inst.instruction |= LOW4 (inst.operands[2].reg);
15390 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15391 inst.instruction |= listlenbits << 8;
15393 neon_dp_fixup (&inst);
15397 do_neon_ldm_stm (void)
15399 /* P, U and L bits are part of bitmask. */
15400 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15401 unsigned offsetbits = inst.operands[1].imm * 2;
15403 if (inst.operands[1].issingle)
15405 do_vfp_nsyn_ldm_stm (is_dbmode);
15409 constraint (is_dbmode && !inst.operands[0].writeback,
15410 _("writeback (!) must be used for VLDMDB and VSTMDB"));
15412 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15413 _("register list must contain at least 1 and at most 16 "
15416 inst.instruction |= inst.operands[0].reg << 16;
15417 inst.instruction |= inst.operands[0].writeback << 21;
15418 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15419 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15421 inst.instruction |= offsetbits;
15423 do_vfp_cond_or_thumb ();
15427 do_neon_ldr_str (void)
15429 int is_ldr = (inst.instruction & (1 << 20)) != 0;
15431 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15432 And is UNPREDICTABLE in thumb mode. */
15434 && inst.operands[1].reg == REG_PC
15435 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
15437 if (!thumb_mode && warn_on_deprecated)
15438 as_warn (_("Use of PC here is deprecated"));
15440 inst.error = _("Use of PC here is UNPREDICTABLE");
15443 if (inst.operands[0].issingle)
15446 do_vfp_nsyn_opcode ("flds");
15448 do_vfp_nsyn_opcode ("fsts");
15453 do_vfp_nsyn_opcode ("fldd");
15455 do_vfp_nsyn_opcode ("fstd");
15459 /* "interleave" version also handles non-interleaving register VLD1/VST1
15463 do_neon_ld_st_interleave (void)
15465 struct neon_type_el et = neon_check_type (1, NS_NULL,
15466 N_8 | N_16 | N_32 | N_64);
15467 unsigned alignbits = 0;
15469 /* The bits in this table go:
15470 0: register stride of one (0) or two (1)
15471 1,2: register list length, minus one (1, 2, 3, 4).
15472 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15473 We use -1 for invalid entries. */
15474 const int typetable[] =
15476 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
15477 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
15478 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
15479 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
15483 if (et.type == NT_invtype)
15486 if (inst.operands[1].immisalign)
15487 switch (inst.operands[1].imm >> 8)
15489 case 64: alignbits = 1; break;
15491 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15492 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15493 goto bad_alignment;
15497 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15498 goto bad_alignment;
15503 first_error (_("bad alignment"));
15507 inst.instruction |= alignbits << 4;
15508 inst.instruction |= neon_logbits (et.size) << 6;
15510 /* Bits [4:6] of the immediate in a list specifier encode register stride
15511 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15512 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15513 up the right value for "type" in a table based on this value and the given
15514 list style, then stick it back. */
15515 idx = ((inst.operands[0].imm >> 4) & 7)
15516 | (((inst.instruction >> 8) & 3) << 3);
15518 typebits = typetable[idx];
15520 constraint (typebits == -1, _("bad list type for instruction"));
15522 inst.instruction &= ~0xf00;
15523 inst.instruction |= typebits << 8;
15526 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15527 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15528 otherwise. The variable arguments are a list of pairs of legal (size, align)
15529 values, terminated with -1. */
15532 neon_alignment_bit (int size, int align, int *do_align, ...)
15535 int result = FAIL, thissize, thisalign;
15537 if (!inst.operands[1].immisalign)
15543 va_start (ap, do_align);
15547 thissize = va_arg (ap, int);
15548 if (thissize == -1)
15550 thisalign = va_arg (ap, int);
15552 if (size == thissize && align == thisalign)
15555 while (result != SUCCESS);
15559 if (result == SUCCESS)
15562 first_error (_("unsupported alignment for instruction"));
15568 do_neon_ld_st_lane (void)
15570 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15571 int align_good, do_align = 0;
15572 int logsize = neon_logbits (et.size);
15573 int align = inst.operands[1].imm >> 8;
15574 int n = (inst.instruction >> 8) & 3;
15575 int max_el = 64 / et.size;
15577 if (et.type == NT_invtype)
15580 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15581 _("bad list length"));
15582 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15583 _("scalar index out of range"));
15584 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15586 _("stride of 2 unavailable when element size is 8"));
15590 case 0: /* VLD1 / VST1. */
15591 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15593 if (align_good == FAIL)
15597 unsigned alignbits = 0;
15600 case 16: alignbits = 0x1; break;
15601 case 32: alignbits = 0x3; break;
15604 inst.instruction |= alignbits << 4;
15608 case 1: /* VLD2 / VST2. */
15609 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15611 if (align_good == FAIL)
15614 inst.instruction |= 1 << 4;
15617 case 2: /* VLD3 / VST3. */
15618 constraint (inst.operands[1].immisalign,
15619 _("can't use alignment with this instruction"));
15622 case 3: /* VLD4 / VST4. */
15623 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15624 16, 64, 32, 64, 32, 128, -1);
15625 if (align_good == FAIL)
15629 unsigned alignbits = 0;
15632 case 8: alignbits = 0x1; break;
15633 case 16: alignbits = 0x1; break;
15634 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15637 inst.instruction |= alignbits << 4;
15644 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
15645 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15646 inst.instruction |= 1 << (4 + logsize);
15648 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15649 inst.instruction |= logsize << 10;
15652 /* Encode single n-element structure to all lanes VLD<n> instructions. */
15655 do_neon_ld_dup (void)
15657 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15658 int align_good, do_align = 0;
15660 if (et.type == NT_invtype)
15663 switch ((inst.instruction >> 8) & 3)
15665 case 0: /* VLD1. */
15666 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15667 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15668 &do_align, 16, 16, 32, 32, -1);
15669 if (align_good == FAIL)
15671 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15674 case 2: inst.instruction |= 1 << 5; break;
15675 default: first_error (_("bad list length")); return;
15677 inst.instruction |= neon_logbits (et.size) << 6;
15680 case 1: /* VLD2. */
15681 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15682 &do_align, 8, 16, 16, 32, 32, 64, -1);
15683 if (align_good == FAIL)
15685 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15686 _("bad list length"));
15687 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15688 inst.instruction |= 1 << 5;
15689 inst.instruction |= neon_logbits (et.size) << 6;
15692 case 2: /* VLD3. */
15693 constraint (inst.operands[1].immisalign,
15694 _("can't use alignment with this instruction"));
15695 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15696 _("bad list length"));
15697 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15698 inst.instruction |= 1 << 5;
15699 inst.instruction |= neon_logbits (et.size) << 6;
15702 case 3: /* VLD4. */
15704 int align = inst.operands[1].imm >> 8;
15705 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15706 16, 64, 32, 64, 32, 128, -1);
15707 if (align_good == FAIL)
15709 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15710 _("bad list length"));
15711 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15712 inst.instruction |= 1 << 5;
15713 if (et.size == 32 && align == 128)
15714 inst.instruction |= 0x3 << 6;
15716 inst.instruction |= neon_logbits (et.size) << 6;
15723 inst.instruction |= do_align << 4;
15726 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15727 apart from bits [11:4]. */
15730 do_neon_ldx_stx (void)
15732 if (inst.operands[1].isreg)
15733 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15735 switch (NEON_LANE (inst.operands[0].imm))
15737 case NEON_INTERLEAVE_LANES:
15738 NEON_ENCODE (INTERLV, inst);
15739 do_neon_ld_st_interleave ();
15742 case NEON_ALL_LANES:
15743 NEON_ENCODE (DUP, inst);
15748 NEON_ENCODE (LANE, inst);
15749 do_neon_ld_st_lane ();
15752 /* L bit comes from bit mask. */
15753 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15754 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15755 inst.instruction |= inst.operands[1].reg << 16;
15757 if (inst.operands[1].postind)
15759 int postreg = inst.operands[1].imm & 0xf;
15760 constraint (!inst.operands[1].immisreg,
15761 _("post-index must be a register"));
15762 constraint (postreg == 0xd || postreg == 0xf,
15763 _("bad register for post-index"));
15764 inst.instruction |= postreg;
15766 else if (inst.operands[1].writeback)
15768 inst.instruction |= 0xd;
15771 inst.instruction |= 0xf;
15774 inst.instruction |= 0xf9000000;
15776 inst.instruction |= 0xf4000000;
15779 /* Overall per-instruction processing. */
15781 /* We need to be able to fix up arbitrary expressions in some statements.
15782 This is so that we can handle symbols that are an arbitrary distance from
15783 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15784 which returns part of an address in a form which will be valid for
15785 a data instruction. We do this by pushing the expression into a symbol
15786 in the expr_section, and creating a fix for that. */
15789 fix_new_arm (fragS * frag,
15803 /* Create an absolute valued symbol, so we have something to
15804 refer to in the object file. Unfortunately for us, gas's
15805 generic expression parsing will already have folded out
15806 any use of .set foo/.type foo %function that may have
15807 been used to set type information of the target location,
15808 that's being specified symbolically. We have to presume
15809 the user knows what they are doing. */
15813 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
15815 symbol = symbol_find_or_make (name);
15816 S_SET_SEGMENT (symbol, absolute_section);
15817 symbol_set_frag (symbol, &zero_address_frag);
15818 S_SET_VALUE (symbol, exp->X_add_number);
15819 exp->X_op = O_symbol;
15820 exp->X_add_symbol = symbol;
15821 exp->X_add_number = 0;
15827 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
15828 (enum bfd_reloc_code_real) reloc);
15832 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
15833 pc_rel, (enum bfd_reloc_code_real) reloc);
15837 /* Mark whether the fix is to a THUMB instruction, or an ARM
15839 new_fix->tc_fix_data = thumb_mode;
15842 /* Create a frg for an instruction requiring relaxation. */
15844 output_relax_insn (void)
15850 /* The size of the instruction is unknown, so tie the debug info to the
15851 start of the instruction. */
15852 dwarf2_emit_insn (0);
15854 switch (inst.reloc.exp.X_op)
15857 sym = inst.reloc.exp.X_add_symbol;
15858 offset = inst.reloc.exp.X_add_number;
15862 offset = inst.reloc.exp.X_add_number;
15865 sym = make_expr_symbol (&inst.reloc.exp);
15869 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
15870 inst.relax, sym, offset, NULL/*offset, opcode*/);
15871 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
15874 /* Write a 32-bit thumb instruction to buf. */
15876 put_thumb32_insn (char * buf, unsigned long insn)
15878 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
15879 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
15883 output_inst (const char * str)
15889 as_bad ("%s -- `%s'", inst.error, str);
15894 output_relax_insn ();
15897 if (inst.size == 0)
15900 to = frag_more (inst.size);
15901 /* PR 9814: Record the thumb mode into the current frag so that we know
15902 what type of NOP padding to use, if necessary. We override any previous
15903 setting so that if the mode has changed then the NOPS that we use will
15904 match the encoding of the last instruction in the frag. */
15905 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
15907 if (thumb_mode && (inst.size > THUMB_SIZE))
15909 gas_assert (inst.size == (2 * THUMB_SIZE));
15910 put_thumb32_insn (to, inst.instruction);
15912 else if (inst.size > INSN_SIZE)
15914 gas_assert (inst.size == (2 * INSN_SIZE));
15915 md_number_to_chars (to, inst.instruction, INSN_SIZE);
15916 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
15919 md_number_to_chars (to, inst.instruction, inst.size);
15921 if (inst.reloc.type != BFD_RELOC_UNUSED)
15922 fix_new_arm (frag_now, to - frag_now->fr_literal,
15923 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
15926 dwarf2_emit_insn (inst.size);
15930 output_it_inst (int cond, int mask, char * to)
15932 unsigned long instruction = 0xbf00;
15935 instruction |= mask;
15936 instruction |= cond << 4;
15940 to = frag_more (2);
15942 dwarf2_emit_insn (2);
15946 md_number_to_chars (to, instruction, 2);
15951 /* Tag values used in struct asm_opcode's tag field. */
15954 OT_unconditional, /* Instruction cannot be conditionalized.
15955 The ARM condition field is still 0xE. */
15956 OT_unconditionalF, /* Instruction cannot be conditionalized
15957 and carries 0xF in its ARM condition field. */
15958 OT_csuffix, /* Instruction takes a conditional suffix. */
15959 OT_csuffixF, /* Some forms of the instruction take a conditional
15960 suffix, others place 0xF where the condition field
15962 OT_cinfix3, /* Instruction takes a conditional infix,
15963 beginning at character index 3. (In
15964 unified mode, it becomes a suffix.) */
15965 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
15966 tsts, cmps, cmns, and teqs. */
15967 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
15968 character index 3, even in unified mode. Used for
15969 legacy instructions where suffix and infix forms
15970 may be ambiguous. */
15971 OT_csuf_or_in3, /* Instruction takes either a conditional
15972 suffix or an infix at character index 3. */
15973 OT_odd_infix_unc, /* This is the unconditional variant of an
15974 instruction that takes a conditional infix
15975 at an unusual position. In unified mode,
15976 this variant will accept a suffix. */
15977 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
15978 are the conditional variants of instructions that
15979 take conditional infixes in unusual positions.
15980 The infix appears at character index
15981 (tag - OT_odd_infix_0). These are not accepted
15982 in unified mode. */
15985 /* Subroutine of md_assemble, responsible for looking up the primary
15986 opcode from the mnemonic the user wrote. STR points to the
15987 beginning of the mnemonic.
15989 This is not simply a hash table lookup, because of conditional
15990 variants. Most instructions have conditional variants, which are
15991 expressed with a _conditional affix_ to the mnemonic. If we were
15992 to encode each conditional variant as a literal string in the opcode
15993 table, it would have approximately 20,000 entries.
15995 Most mnemonics take this affix as a suffix, and in unified syntax,
15996 'most' is upgraded to 'all'. However, in the divided syntax, some
15997 instructions take the affix as an infix, notably the s-variants of
15998 the arithmetic instructions. Of those instructions, all but six
15999 have the infix appear after the third character of the mnemonic.
16001 Accordingly, the algorithm for looking up primary opcodes given
16004 1. Look up the identifier in the opcode table.
16005 If we find a match, go to step U.
16007 2. Look up the last two characters of the identifier in the
16008 conditions table. If we find a match, look up the first N-2
16009 characters of the identifier in the opcode table. If we
16010 find a match, go to step CE.
16012 3. Look up the fourth and fifth characters of the identifier in
16013 the conditions table. If we find a match, extract those
16014 characters from the identifier, and look up the remaining
16015 characters in the opcode table. If we find a match, go
16020 U. Examine the tag field of the opcode structure, in case this is
16021 one of the six instructions with its conditional infix in an
16022 unusual place. If it is, the tag tells us where to find the
16023 infix; look it up in the conditions table and set inst.cond
16024 accordingly. Otherwise, this is an unconditional instruction.
16025 Again set inst.cond accordingly. Return the opcode structure.
16027 CE. Examine the tag field to make sure this is an instruction that
16028 should receive a conditional suffix. If it is not, fail.
16029 Otherwise, set inst.cond from the suffix we already looked up,
16030 and return the opcode structure.
16032 CM. Examine the tag field to make sure this is an instruction that
16033 should receive a conditional infix after the third character.
16034 If it is not, fail. Otherwise, undo the edits to the current
16035 line of input and proceed as for case CE. */
16037 static const struct asm_opcode *
16038 opcode_lookup (char **str)
16042 const struct asm_opcode *opcode;
16043 const struct asm_cond *cond;
16046 /* Scan up to the end of the mnemonic, which must end in white space,
16047 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
16048 for (base = end = *str; *end != '\0'; end++)
16049 if (*end == ' ' || *end == '.')
16055 /* Handle a possible width suffix and/or Neon type suffix. */
16060 /* The .w and .n suffixes are only valid if the unified syntax is in
16062 if (unified_syntax && end[1] == 'w')
16064 else if (unified_syntax && end[1] == 'n')
16069 inst.vectype.elems = 0;
16071 *str = end + offset;
16073 if (end[offset] == '.')
16075 /* See if we have a Neon type suffix (possible in either unified or
16076 non-unified ARM syntax mode). */
16077 if (parse_neon_type (&inst.vectype, str) == FAIL)
16080 else if (end[offset] != '\0' && end[offset] != ' ')
16086 /* Look for unaffixed or special-case affixed mnemonic. */
16087 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16092 if (opcode->tag < OT_odd_infix_0)
16094 inst.cond = COND_ALWAYS;
16098 if (warn_on_deprecated && unified_syntax)
16099 as_warn (_("conditional infixes are deprecated in unified syntax"));
16100 affix = base + (opcode->tag - OT_odd_infix_0);
16101 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16104 inst.cond = cond->value;
16108 /* Cannot have a conditional suffix on a mnemonic of less than two
16110 if (end - base < 3)
16113 /* Look for suffixed mnemonic. */
16115 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16116 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16118 if (opcode && cond)
16121 switch (opcode->tag)
16123 case OT_cinfix3_legacy:
16124 /* Ignore conditional suffixes matched on infix only mnemonics. */
16128 case OT_cinfix3_deprecated:
16129 case OT_odd_infix_unc:
16130 if (!unified_syntax)
16132 /* else fall through */
16136 case OT_csuf_or_in3:
16137 inst.cond = cond->value;
16140 case OT_unconditional:
16141 case OT_unconditionalF:
16143 inst.cond = cond->value;
16146 /* Delayed diagnostic. */
16147 inst.error = BAD_COND;
16148 inst.cond = COND_ALWAYS;
16157 /* Cannot have a usual-position infix on a mnemonic of less than
16158 six characters (five would be a suffix). */
16159 if (end - base < 6)
16162 /* Look for infixed mnemonic in the usual position. */
16164 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16168 memcpy (save, affix, 2);
16169 memmove (affix, affix + 2, (end - affix) - 2);
16170 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16172 memmove (affix + 2, affix, (end - affix) - 2);
16173 memcpy (affix, save, 2);
16176 && (opcode->tag == OT_cinfix3
16177 || opcode->tag == OT_cinfix3_deprecated
16178 || opcode->tag == OT_csuf_or_in3
16179 || opcode->tag == OT_cinfix3_legacy))
16182 if (warn_on_deprecated && unified_syntax
16183 && (opcode->tag == OT_cinfix3
16184 || opcode->tag == OT_cinfix3_deprecated))
16185 as_warn (_("conditional infixes are deprecated in unified syntax"));
16187 inst.cond = cond->value;
16194 /* This function generates an initial IT instruction, leaving its block
16195 virtually open for the new instructions. Eventually,
16196 the mask will be updated by now_it_add_mask () each time
16197 a new instruction needs to be included in the IT block.
16198 Finally, the block is closed with close_automatic_it_block ().
16199 The block closure can be requested either from md_assemble (),
16200 a tencode (), or due to a label hook. */
16203 new_automatic_it_block (int cond)
16205 now_it.state = AUTOMATIC_IT_BLOCK;
16206 now_it.mask = 0x18;
16208 now_it.block_length = 1;
16209 mapping_state (MAP_THUMB);
16210 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
16211 now_it.warn_deprecated = FALSE;
16212 now_it.insn_cond = TRUE;
16215 /* Close an automatic IT block.
16216 See comments in new_automatic_it_block (). */
16219 close_automatic_it_block (void)
16221 now_it.mask = 0x10;
16222 now_it.block_length = 0;
16225 /* Update the mask of the current automatically-generated IT
16226 instruction. See comments in new_automatic_it_block (). */
16229 now_it_add_mask (int cond)
16231 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
16232 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
16233 | ((bitvalue) << (nbit)))
16234 const int resulting_bit = (cond & 1);
16236 now_it.mask &= 0xf;
16237 now_it.mask = SET_BIT_VALUE (now_it.mask,
16239 (5 - now_it.block_length));
16240 now_it.mask = SET_BIT_VALUE (now_it.mask,
16242 ((5 - now_it.block_length) - 1) );
16243 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16246 #undef SET_BIT_VALUE
16249 /* The IT blocks handling machinery is accessed through the these functions:
16250 it_fsm_pre_encode () from md_assemble ()
16251 set_it_insn_type () optional, from the tencode functions
16252 set_it_insn_type_last () ditto
16253 in_it_block () ditto
16254 it_fsm_post_encode () from md_assemble ()
16255 force_automatic_it_block_close () from label habdling functions
16258 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16259 initializing the IT insn type with a generic initial value depending
16260 on the inst.condition.
16261 2) During the tencode function, two things may happen:
16262 a) The tencode function overrides the IT insn type by
16263 calling either set_it_insn_type (type) or set_it_insn_type_last ().
16264 b) The tencode function queries the IT block state by
16265 calling in_it_block () (i.e. to determine narrow/not narrow mode).
16267 Both set_it_insn_type and in_it_block run the internal FSM state
16268 handling function (handle_it_state), because: a) setting the IT insn
16269 type may incur in an invalid state (exiting the function),
16270 and b) querying the state requires the FSM to be updated.
16271 Specifically we want to avoid creating an IT block for conditional
16272 branches, so it_fsm_pre_encode is actually a guess and we can't
16273 determine whether an IT block is required until the tencode () routine
16274 has decided what type of instruction this actually it.
16275 Because of this, if set_it_insn_type and in_it_block have to be used,
16276 set_it_insn_type has to be called first.
16278 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16279 determines the insn IT type depending on the inst.cond code.
16280 When a tencode () routine encodes an instruction that can be
16281 either outside an IT block, or, in the case of being inside, has to be
16282 the last one, set_it_insn_type_last () will determine the proper
16283 IT instruction type based on the inst.cond code. Otherwise,
16284 set_it_insn_type can be called for overriding that logic or
16285 for covering other cases.
16287 Calling handle_it_state () may not transition the IT block state to
16288 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16289 still queried. Instead, if the FSM determines that the state should
16290 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16291 after the tencode () function: that's what it_fsm_post_encode () does.
16293 Since in_it_block () calls the state handling function to get an
16294 updated state, an error may occur (due to invalid insns combination).
16295 In that case, inst.error is set.
16296 Therefore, inst.error has to be checked after the execution of
16297 the tencode () routine.
16299 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16300 any pending state change (if any) that didn't take place in
16301 handle_it_state () as explained above. */
16304 it_fsm_pre_encode (void)
16306 if (inst.cond != COND_ALWAYS)
16307 inst.it_insn_type = INSIDE_IT_INSN;
16309 inst.it_insn_type = OUTSIDE_IT_INSN;
16311 now_it.state_handled = 0;
16314 /* IT state FSM handling function. */
16317 handle_it_state (void)
16319 now_it.state_handled = 1;
16320 now_it.insn_cond = FALSE;
16322 switch (now_it.state)
16324 case OUTSIDE_IT_BLOCK:
16325 switch (inst.it_insn_type)
16327 case OUTSIDE_IT_INSN:
16330 case INSIDE_IT_INSN:
16331 case INSIDE_IT_LAST_INSN:
16332 if (thumb_mode == 0)
16335 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16336 as_tsktsk (_("Warning: conditional outside an IT block"\
16341 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16342 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16344 /* Automatically generate the IT instruction. */
16345 new_automatic_it_block (inst.cond);
16346 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16347 close_automatic_it_block ();
16351 inst.error = BAD_OUT_IT;
16357 case IF_INSIDE_IT_LAST_INSN:
16358 case NEUTRAL_IT_INSN:
16362 now_it.state = MANUAL_IT_BLOCK;
16363 now_it.block_length = 0;
16368 case AUTOMATIC_IT_BLOCK:
16369 /* Three things may happen now:
16370 a) We should increment current it block size;
16371 b) We should close current it block (closing insn or 4 insns);
16372 c) We should close current it block and start a new one (due
16373 to incompatible conditions or
16374 4 insns-length block reached). */
16376 switch (inst.it_insn_type)
16378 case OUTSIDE_IT_INSN:
16379 /* The closure of the block shall happen immediatelly,
16380 so any in_it_block () call reports the block as closed. */
16381 force_automatic_it_block_close ();
16384 case INSIDE_IT_INSN:
16385 case INSIDE_IT_LAST_INSN:
16386 case IF_INSIDE_IT_LAST_INSN:
16387 now_it.block_length++;
16389 if (now_it.block_length > 4
16390 || !now_it_compatible (inst.cond))
16392 force_automatic_it_block_close ();
16393 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16394 new_automatic_it_block (inst.cond);
16398 now_it.insn_cond = TRUE;
16399 now_it_add_mask (inst.cond);
16402 if (now_it.state == AUTOMATIC_IT_BLOCK
16403 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16404 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16405 close_automatic_it_block ();
16408 case NEUTRAL_IT_INSN:
16409 now_it.block_length++;
16410 now_it.insn_cond = TRUE;
16412 if (now_it.block_length > 4)
16413 force_automatic_it_block_close ();
16415 now_it_add_mask (now_it.cc & 1);
16419 close_automatic_it_block ();
16420 now_it.state = MANUAL_IT_BLOCK;
16425 case MANUAL_IT_BLOCK:
16427 /* Check conditional suffixes. */
16428 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
16431 now_it.mask &= 0x1f;
16432 is_last = (now_it.mask == 0x10);
16433 now_it.insn_cond = TRUE;
16435 switch (inst.it_insn_type)
16437 case OUTSIDE_IT_INSN:
16438 inst.error = BAD_NOT_IT;
16441 case INSIDE_IT_INSN:
16442 if (cond != inst.cond)
16444 inst.error = BAD_IT_COND;
16449 case INSIDE_IT_LAST_INSN:
16450 case IF_INSIDE_IT_LAST_INSN:
16451 if (cond != inst.cond)
16453 inst.error = BAD_IT_COND;
16458 inst.error = BAD_BRANCH;
16463 case NEUTRAL_IT_INSN:
16464 /* The BKPT instruction is unconditional even in an IT block. */
16468 inst.error = BAD_IT_IT;
16478 struct depr_insn_mask
16480 unsigned long pattern;
16481 unsigned long mask;
16482 const char* description;
16485 /* List of 16-bit instruction patterns deprecated in an IT block in
16487 static const struct depr_insn_mask depr_it_insns[] = {
16488 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
16489 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
16490 { 0xa000, 0xb800, N_("ADR") },
16491 { 0x4800, 0xf800, N_("Literal loads") },
16492 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
16493 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
16498 it_fsm_post_encode (void)
16502 if (!now_it.state_handled)
16503 handle_it_state ();
16505 if (now_it.insn_cond
16506 && !now_it.warn_deprecated
16507 && warn_on_deprecated
16508 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
16510 if (inst.instruction >= 0x10000)
16512 as_warn (_("it blocks containing wide Thumb instructions are "
16513 "deprecated in ARMv8"));
16514 now_it.warn_deprecated = TRUE;
16518 const struct depr_insn_mask *p = depr_it_insns;
16520 while (p->mask != 0)
16522 if ((inst.instruction & p->mask) == p->pattern)
16524 as_warn (_("it blocks containing 16-bit Thumb intsructions "
16525 "of the following class are deprecated in ARMv8: "
16526 "%s"), p->description);
16527 now_it.warn_deprecated = TRUE;
16535 if (now_it.block_length > 1)
16537 as_warn (_("it blocks of more than one conditional instruction are "
16538 "deprecated in ARMv8"));
16539 now_it.warn_deprecated = TRUE;
16543 is_last = (now_it.mask == 0x10);
16546 now_it.state = OUTSIDE_IT_BLOCK;
16552 force_automatic_it_block_close (void)
16554 if (now_it.state == AUTOMATIC_IT_BLOCK)
16556 close_automatic_it_block ();
16557 now_it.state = OUTSIDE_IT_BLOCK;
16565 if (!now_it.state_handled)
16566 handle_it_state ();
16568 return now_it.state != OUTSIDE_IT_BLOCK;
16572 md_assemble (char *str)
16575 const struct asm_opcode * opcode;
16577 /* Align the previous label if needed. */
16578 if (last_label_seen != NULL)
16580 symbol_set_frag (last_label_seen, frag_now);
16581 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
16582 S_SET_SEGMENT (last_label_seen, now_seg);
16585 memset (&inst, '\0', sizeof (inst));
16586 inst.reloc.type = BFD_RELOC_UNUSED;
16588 opcode = opcode_lookup (&p);
16591 /* It wasn't an instruction, but it might be a register alias of
16592 the form alias .req reg, or a Neon .dn/.qn directive. */
16593 if (! create_register_alias (str, p)
16594 && ! create_neon_reg_alias (str, p))
16595 as_bad (_("bad instruction `%s'"), str);
16600 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
16601 as_warn (_("s suffix on comparison instruction is deprecated"));
16603 /* The value which unconditional instructions should have in place of the
16604 condition field. */
16605 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
16609 arm_feature_set variant;
16611 variant = cpu_variant;
16612 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
16613 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
16614 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
16615 /* Check that this instruction is supported for this CPU. */
16616 if (!opcode->tvariant
16617 || (thumb_mode == 1
16618 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
16620 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
16623 if (inst.cond != COND_ALWAYS && !unified_syntax
16624 && opcode->tencode != do_t_branch)
16626 as_bad (_("Thumb does not support conditional execution"));
16630 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
16632 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
16633 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
16634 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
16636 /* Two things are addressed here.
16637 1) Implicit require narrow instructions on Thumb-1.
16638 This avoids relaxation accidentally introducing Thumb-2
16640 2) Reject wide instructions in non Thumb-2 cores. */
16641 if (inst.size_req == 0)
16643 else if (inst.size_req == 4)
16645 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
16651 inst.instruction = opcode->tvalue;
16653 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
16655 /* Prepare the it_insn_type for those encodings that don't set
16657 it_fsm_pre_encode ();
16659 opcode->tencode ();
16661 it_fsm_post_encode ();
16664 if (!(inst.error || inst.relax))
16666 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
16667 inst.size = (inst.instruction > 0xffff ? 4 : 2);
16668 if (inst.size_req && inst.size_req != inst.size)
16670 as_bad (_("cannot honor width suffix -- `%s'"), str);
16675 /* Something has gone badly wrong if we try to relax a fixed size
16677 gas_assert (inst.size_req == 0 || !inst.relax);
16679 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16680 *opcode->tvariant);
16681 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
16682 set those bits when Thumb-2 32-bit instructions are seen. ie.
16683 anything other than bl/blx and v6-M instructions.
16684 This is overly pessimistic for relaxable instructions. */
16685 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
16687 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
16688 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
16689 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16692 check_neon_suffixes;
16696 mapping_state (MAP_THUMB);
16699 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
16703 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
16704 is_bx = (opcode->aencode == do_bx);
16706 /* Check that this instruction is supported for this CPU. */
16707 if (!(is_bx && fix_v4bx)
16708 && !(opcode->avariant &&
16709 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
16711 as_bad (_("selected processor does not support ARM mode `%s'"), str);
16716 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
16720 inst.instruction = opcode->avalue;
16721 if (opcode->tag == OT_unconditionalF)
16722 inst.instruction |= 0xF << 28;
16724 inst.instruction |= inst.cond << 28;
16725 inst.size = INSN_SIZE;
16726 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
16728 it_fsm_pre_encode ();
16729 opcode->aencode ();
16730 it_fsm_post_encode ();
16732 /* Arm mode bx is marked as both v4T and v5 because it's still required
16733 on a hypothetical non-thumb v5 core. */
16735 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
16737 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
16738 *opcode->avariant);
16740 check_neon_suffixes;
16744 mapping_state (MAP_ARM);
16749 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
16757 check_it_blocks_finished (void)
16762 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
16763 if (seg_info (sect)->tc_segment_info_data.current_it.state
16764 == MANUAL_IT_BLOCK)
16766 as_warn (_("section '%s' finished with an open IT block."),
16770 if (now_it.state == MANUAL_IT_BLOCK)
16771 as_warn (_("file finished with an open IT block."));
16775 /* Various frobbings of labels and their addresses. */
16778 arm_start_line_hook (void)
16780 last_label_seen = NULL;
16784 arm_frob_label (symbolS * sym)
16786 last_label_seen = sym;
16788 ARM_SET_THUMB (sym, thumb_mode);
16790 #if defined OBJ_COFF || defined OBJ_ELF
16791 ARM_SET_INTERWORK (sym, support_interwork);
16794 force_automatic_it_block_close ();
16796 /* Note - do not allow local symbols (.Lxxx) to be labelled
16797 as Thumb functions. This is because these labels, whilst
16798 they exist inside Thumb code, are not the entry points for
16799 possible ARM->Thumb calls. Also, these labels can be used
16800 as part of a computed goto or switch statement. eg gcc
16801 can generate code that looks like this:
16803 ldr r2, [pc, .Laaa]
16813 The first instruction loads the address of the jump table.
16814 The second instruction converts a table index into a byte offset.
16815 The third instruction gets the jump address out of the table.
16816 The fourth instruction performs the jump.
16818 If the address stored at .Laaa is that of a symbol which has the
16819 Thumb_Func bit set, then the linker will arrange for this address
16820 to have the bottom bit set, which in turn would mean that the
16821 address computation performed by the third instruction would end
16822 up with the bottom bit set. Since the ARM is capable of unaligned
16823 word loads, the instruction would then load the incorrect address
16824 out of the jump table, and chaos would ensue. */
16825 if (label_is_thumb_function_name
16826 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
16827 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
16829 /* When the address of a Thumb function is taken the bottom
16830 bit of that address should be set. This will allow
16831 interworking between Arm and Thumb functions to work
16834 THUMB_SET_FUNC (sym, 1);
16836 label_is_thumb_function_name = FALSE;
16839 dwarf2_emit_label (sym);
16843 arm_data_in_code (void)
16845 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
16847 *input_line_pointer = '/';
16848 input_line_pointer += 5;
16849 *input_line_pointer = 0;
16857 arm_canonicalize_symbol_name (char * name)
16861 if (thumb_mode && (len = strlen (name)) > 5
16862 && streq (name + len - 5, "/data"))
16863 *(name + len - 5) = 0;
16868 /* Table of all register names defined by default. The user can
16869 define additional names with .req. Note that all register names
16870 should appear in both upper and lowercase variants. Some registers
16871 also have mixed-case names. */
16873 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16874 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
16875 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16876 #define REGSET(p,t) \
16877 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16878 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16879 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16880 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16881 #define REGSETH(p,t) \
16882 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16883 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16884 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16885 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16886 #define REGSET2(p,t) \
16887 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16888 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16889 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16890 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16891 #define SPLRBANK(base,bank,t) \
16892 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
16893 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
16894 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
16895 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
16896 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
16897 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
16899 static const struct reg_entry reg_names[] =
16901 /* ARM integer registers. */
16902 REGSET(r, RN), REGSET(R, RN),
16904 /* ATPCS synonyms. */
16905 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
16906 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
16907 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
16909 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
16910 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
16911 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
16913 /* Well-known aliases. */
16914 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
16915 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
16917 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
16918 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
16920 /* Coprocessor numbers. */
16921 REGSET(p, CP), REGSET(P, CP),
16923 /* Coprocessor register numbers. The "cr" variants are for backward
16925 REGSET(c, CN), REGSET(C, CN),
16926 REGSET(cr, CN), REGSET(CR, CN),
16928 /* ARM banked registers. */
16929 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
16930 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
16931 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
16932 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
16933 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
16934 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
16935 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
16937 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
16938 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
16939 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
16940 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
16941 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
16942 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
16943 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
16944 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
16946 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
16947 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
16948 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
16949 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
16950 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
16951 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
16952 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
16953 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
16954 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
16956 /* FPA registers. */
16957 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
16958 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
16960 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
16961 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
16963 /* VFP SP registers. */
16964 REGSET(s,VFS), REGSET(S,VFS),
16965 REGSETH(s,VFS), REGSETH(S,VFS),
16967 /* VFP DP Registers. */
16968 REGSET(d,VFD), REGSET(D,VFD),
16969 /* Extra Neon DP registers. */
16970 REGSETH(d,VFD), REGSETH(D,VFD),
16972 /* Neon QP registers. */
16973 REGSET2(q,NQ), REGSET2(Q,NQ),
16975 /* VFP control registers. */
16976 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
16977 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
16978 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
16979 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
16980 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
16981 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
16983 /* Maverick DSP coprocessor registers. */
16984 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
16985 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
16987 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
16988 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
16989 REGDEF(dspsc,0,DSPSC),
16991 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
16992 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
16993 REGDEF(DSPSC,0,DSPSC),
16995 /* iWMMXt data registers - p0, c0-15. */
16996 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
16998 /* iWMMXt control registers - p1, c0-3. */
16999 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
17000 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
17001 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
17002 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
17004 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
17005 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
17006 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
17007 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
17008 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
17010 /* XScale accumulator registers. */
17011 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
17017 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
17018 within psr_required_here. */
17019 static const struct asm_psr psrs[] =
17021 /* Backward compatibility notation. Note that "all" is no longer
17022 truly all possible PSR bits. */
17023 {"all", PSR_c | PSR_f},
17027 /* Individual flags. */
17033 /* Combinations of flags. */
17034 {"fs", PSR_f | PSR_s},
17035 {"fx", PSR_f | PSR_x},
17036 {"fc", PSR_f | PSR_c},
17037 {"sf", PSR_s | PSR_f},
17038 {"sx", PSR_s | PSR_x},
17039 {"sc", PSR_s | PSR_c},
17040 {"xf", PSR_x | PSR_f},
17041 {"xs", PSR_x | PSR_s},
17042 {"xc", PSR_x | PSR_c},
17043 {"cf", PSR_c | PSR_f},
17044 {"cs", PSR_c | PSR_s},
17045 {"cx", PSR_c | PSR_x},
17046 {"fsx", PSR_f | PSR_s | PSR_x},
17047 {"fsc", PSR_f | PSR_s | PSR_c},
17048 {"fxs", PSR_f | PSR_x | PSR_s},
17049 {"fxc", PSR_f | PSR_x | PSR_c},
17050 {"fcs", PSR_f | PSR_c | PSR_s},
17051 {"fcx", PSR_f | PSR_c | PSR_x},
17052 {"sfx", PSR_s | PSR_f | PSR_x},
17053 {"sfc", PSR_s | PSR_f | PSR_c},
17054 {"sxf", PSR_s | PSR_x | PSR_f},
17055 {"sxc", PSR_s | PSR_x | PSR_c},
17056 {"scf", PSR_s | PSR_c | PSR_f},
17057 {"scx", PSR_s | PSR_c | PSR_x},
17058 {"xfs", PSR_x | PSR_f | PSR_s},
17059 {"xfc", PSR_x | PSR_f | PSR_c},
17060 {"xsf", PSR_x | PSR_s | PSR_f},
17061 {"xsc", PSR_x | PSR_s | PSR_c},
17062 {"xcf", PSR_x | PSR_c | PSR_f},
17063 {"xcs", PSR_x | PSR_c | PSR_s},
17064 {"cfs", PSR_c | PSR_f | PSR_s},
17065 {"cfx", PSR_c | PSR_f | PSR_x},
17066 {"csf", PSR_c | PSR_s | PSR_f},
17067 {"csx", PSR_c | PSR_s | PSR_x},
17068 {"cxf", PSR_c | PSR_x | PSR_f},
17069 {"cxs", PSR_c | PSR_x | PSR_s},
17070 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
17071 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
17072 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
17073 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
17074 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
17075 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
17076 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
17077 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
17078 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
17079 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
17080 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
17081 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
17082 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
17083 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
17084 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
17085 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
17086 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
17087 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
17088 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
17089 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
17090 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
17091 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
17092 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
17093 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
17096 /* Table of V7M psr names. */
17097 static const struct asm_psr v7m_psrs[] =
17099 {"apsr", 0 }, {"APSR", 0 },
17100 {"iapsr", 1 }, {"IAPSR", 1 },
17101 {"eapsr", 2 }, {"EAPSR", 2 },
17102 {"psr", 3 }, {"PSR", 3 },
17103 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
17104 {"ipsr", 5 }, {"IPSR", 5 },
17105 {"epsr", 6 }, {"EPSR", 6 },
17106 {"iepsr", 7 }, {"IEPSR", 7 },
17107 {"msp", 8 }, {"MSP", 8 },
17108 {"psp", 9 }, {"PSP", 9 },
17109 {"primask", 16}, {"PRIMASK", 16},
17110 {"basepri", 17}, {"BASEPRI", 17},
17111 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
17112 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
17113 {"faultmask", 19}, {"FAULTMASK", 19},
17114 {"control", 20}, {"CONTROL", 20}
17117 /* Table of all shift-in-operand names. */
17118 static const struct asm_shift_name shift_names [] =
17120 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
17121 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
17122 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
17123 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
17124 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
17125 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
17128 /* Table of all explicit relocation names. */
17130 static struct reloc_entry reloc_names[] =
17132 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
17133 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
17134 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
17135 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
17136 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
17137 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
17138 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
17139 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
17140 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
17141 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
17142 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
17143 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
17144 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
17145 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
17146 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
17147 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
17148 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
17149 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
17153 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
17154 static const struct asm_cond conds[] =
17158 {"cs", 0x2}, {"hs", 0x2},
17159 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
17173 static struct asm_barrier_opt barrier_opt_names[] =
17175 { "sy", 0xf }, { "SY", 0xf },
17176 { "un", 0x7 }, { "UN", 0x7 },
17177 { "st", 0xe }, { "ST", 0xe },
17178 { "unst", 0x6 }, { "UNST", 0x6 },
17179 { "ish", 0xb }, { "ISH", 0xb },
17180 { "sh", 0xb }, { "SH", 0xb },
17181 { "ishst", 0xa }, { "ISHST", 0xa },
17182 { "shst", 0xa }, { "SHST", 0xa },
17183 { "nsh", 0x7 }, { "NSH", 0x7 },
17184 { "nshst", 0x6 }, { "NSHST", 0x6 },
17185 { "osh", 0x3 }, { "OSH", 0x3 },
17186 { "oshst", 0x2 }, { "OSHST", 0x2 }
17189 /* Table of ARM-format instructions. */
17191 /* Macros for gluing together operand strings. N.B. In all cases
17192 other than OPS0, the trailing OP_stop comes from default
17193 zero-initialization of the unspecified elements of the array. */
17194 #define OPS0() { OP_stop, }
17195 #define OPS1(a) { OP_##a, }
17196 #define OPS2(a,b) { OP_##a,OP_##b, }
17197 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
17198 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
17199 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
17200 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
17202 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
17203 This is useful when mixing operands for ARM and THUMB, i.e. using the
17204 MIX_ARM_THUMB_OPERANDS macro.
17205 In order to use these macros, prefix the number of operands with _
17207 #define OPS_1(a) { a, }
17208 #define OPS_2(a,b) { a,b, }
17209 #define OPS_3(a,b,c) { a,b,c, }
17210 #define OPS_4(a,b,c,d) { a,b,c,d, }
17211 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
17212 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
17214 /* These macros abstract out the exact format of the mnemonic table and
17215 save some repeated characters. */
17217 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
17218 #define TxCE(mnem, op, top, nops, ops, ae, te) \
17219 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
17220 THUMB_VARIANT, do_##ae, do_##te }
17222 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
17223 a T_MNEM_xyz enumerator. */
17224 #define TCE(mnem, aop, top, nops, ops, ae, te) \
17225 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
17226 #define tCE(mnem, aop, top, nops, ops, ae, te) \
17227 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17229 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
17230 infix after the third character. */
17231 #define TxC3(mnem, op, top, nops, ops, ae, te) \
17232 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
17233 THUMB_VARIANT, do_##ae, do_##te }
17234 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
17235 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
17236 THUMB_VARIANT, do_##ae, do_##te }
17237 #define TC3(mnem, aop, top, nops, ops, ae, te) \
17238 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
17239 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
17240 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
17241 #define tC3(mnem, aop, top, nops, ops, ae, te) \
17242 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17243 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
17244 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17246 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
17247 appear in the condition table. */
17248 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
17249 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17250 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
17252 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
17253 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
17254 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
17255 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
17256 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
17257 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
17258 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
17259 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
17260 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
17261 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
17262 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
17263 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
17264 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
17265 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
17266 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
17267 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
17268 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
17269 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
17270 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
17271 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
17273 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
17274 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
17275 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
17276 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
17278 /* Mnemonic that cannot be conditionalized. The ARM condition-code
17279 field is still 0xE. Many of the Thumb variants can be executed
17280 conditionally, so this is checked separately. */
17281 #define TUE(mnem, op, top, nops, ops, ae, te) \
17282 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17283 THUMB_VARIANT, do_##ae, do_##te }
17285 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17286 condition code field. */
17287 #define TUF(mnem, op, top, nops, ops, ae, te) \
17288 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17289 THUMB_VARIANT, do_##ae, do_##te }
17291 /* ARM-only variants of all the above. */
17292 #define CE(mnem, op, nops, ops, ae) \
17293 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17295 #define C3(mnem, op, nops, ops, ae) \
17296 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17298 /* Legacy mnemonics that always have conditional infix after the third
17300 #define CL(mnem, op, nops, ops, ae) \
17301 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17302 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17304 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
17305 #define cCE(mnem, op, nops, ops, ae) \
17306 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17308 /* Legacy coprocessor instructions where conditional infix and conditional
17309 suffix are ambiguous. For consistency this includes all FPA instructions,
17310 not just the potentially ambiguous ones. */
17311 #define cCL(mnem, op, nops, ops, ae) \
17312 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17313 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17315 /* Coprocessor, takes either a suffix or a position-3 infix
17316 (for an FPA corner case). */
17317 #define C3E(mnem, op, nops, ops, ae) \
17318 { mnem, OPS##nops ops, OT_csuf_or_in3, \
17319 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17321 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
17322 { m1 #m2 m3, OPS##nops ops, \
17323 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17324 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17326 #define CM(m1, m2, op, nops, ops, ae) \
17327 xCM_ (m1, , m2, op, nops, ops, ae), \
17328 xCM_ (m1, eq, m2, op, nops, ops, ae), \
17329 xCM_ (m1, ne, m2, op, nops, ops, ae), \
17330 xCM_ (m1, cs, m2, op, nops, ops, ae), \
17331 xCM_ (m1, hs, m2, op, nops, ops, ae), \
17332 xCM_ (m1, cc, m2, op, nops, ops, ae), \
17333 xCM_ (m1, ul, m2, op, nops, ops, ae), \
17334 xCM_ (m1, lo, m2, op, nops, ops, ae), \
17335 xCM_ (m1, mi, m2, op, nops, ops, ae), \
17336 xCM_ (m1, pl, m2, op, nops, ops, ae), \
17337 xCM_ (m1, vs, m2, op, nops, ops, ae), \
17338 xCM_ (m1, vc, m2, op, nops, ops, ae), \
17339 xCM_ (m1, hi, m2, op, nops, ops, ae), \
17340 xCM_ (m1, ls, m2, op, nops, ops, ae), \
17341 xCM_ (m1, ge, m2, op, nops, ops, ae), \
17342 xCM_ (m1, lt, m2, op, nops, ops, ae), \
17343 xCM_ (m1, gt, m2, op, nops, ops, ae), \
17344 xCM_ (m1, le, m2, op, nops, ops, ae), \
17345 xCM_ (m1, al, m2, op, nops, ops, ae)
17347 #define UE(mnem, op, nops, ops, ae) \
17348 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17350 #define UF(mnem, op, nops, ops, ae) \
17351 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17353 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
17354 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17355 use the same encoding function for each. */
17356 #define NUF(mnem, op, nops, ops, enc) \
17357 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
17358 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17360 /* Neon data processing, version which indirects through neon_enc_tab for
17361 the various overloaded versions of opcodes. */
17362 #define nUF(mnem, op, nops, ops, enc) \
17363 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
17364 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17366 /* Neon insn with conditional suffix for the ARM version, non-overloaded
17368 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
17369 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
17370 THUMB_VARIANT, do_##enc, do_##enc }
17372 #define NCE(mnem, op, nops, ops, enc) \
17373 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17375 #define NCEF(mnem, op, nops, ops, enc) \
17376 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17378 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
17379 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
17380 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
17381 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17383 #define nCE(mnem, op, nops, ops, enc) \
17384 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17386 #define nCEF(mnem, op, nops, ops, enc) \
17387 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17391 static const struct asm_opcode insns[] =
17393 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
17394 #define THUMB_VARIANT &arm_ext_v4t
17395 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
17396 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
17397 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
17398 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
17399 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
17400 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
17401 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
17402 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
17403 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
17404 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
17405 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
17406 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
17407 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
17408 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
17409 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
17410 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
17412 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17413 for setting PSR flag bits. They are obsolete in V6 and do not
17414 have Thumb equivalents. */
17415 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17416 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17417 CL("tstp", 110f000, 2, (RR, SH), cmp),
17418 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17419 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17420 CL("cmpp", 150f000, 2, (RR, SH), cmp),
17421 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17422 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17423 CL("cmnp", 170f000, 2, (RR, SH), cmp),
17425 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
17426 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
17427 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
17428 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
17430 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
17431 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17432 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
17434 OP_ADDRGLDR),ldst, t_ldst),
17435 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17437 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17438 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17439 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17440 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17441 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17442 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17444 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
17445 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
17446 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
17447 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
17450 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
17451 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
17452 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
17454 /* Thumb-compatibility pseudo ops. */
17455 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
17456 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
17457 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
17458 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
17459 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
17460 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
17461 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
17462 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
17463 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
17464 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
17465 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
17466 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
17468 /* These may simplify to neg. */
17469 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
17470 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
17472 #undef THUMB_VARIANT
17473 #define THUMB_VARIANT & arm_ext_v6
17475 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
17477 /* V1 instructions with no Thumb analogue prior to V6T2. */
17478 #undef THUMB_VARIANT
17479 #define THUMB_VARIANT & arm_ext_v6t2
17481 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
17482 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
17483 CL("teqp", 130f000, 2, (RR, SH), cmp),
17485 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17486 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17487 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
17488 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17490 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17491 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17493 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17494 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17496 /* V1 instructions with no Thumb analogue at all. */
17497 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
17498 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
17500 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
17501 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
17502 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
17503 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
17504 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
17505 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
17506 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
17507 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
17510 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
17511 #undef THUMB_VARIANT
17512 #define THUMB_VARIANT & arm_ext_v4t
17514 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
17515 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
17517 #undef THUMB_VARIANT
17518 #define THUMB_VARIANT & arm_ext_v6t2
17520 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17521 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
17523 /* Generic coprocessor instructions. */
17524 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
17525 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17526 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17527 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17528 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17529 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
17530 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
17533 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
17535 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17536 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17539 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
17540 #undef THUMB_VARIANT
17541 #define THUMB_VARIANT & arm_ext_msr
17543 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
17544 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
17547 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
17548 #undef THUMB_VARIANT
17549 #define THUMB_VARIANT & arm_ext_v6t2
17551 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17552 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17553 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17554 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17555 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17556 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17557 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17558 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17561 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
17562 #undef THUMB_VARIANT
17563 #define THUMB_VARIANT & arm_ext_v4t
17565 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17566 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17567 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17568 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17569 tCM("ld","sh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17570 tCM("ld","sb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17573 #define ARM_VARIANT & arm_ext_v4t_5
17575 /* ARM Architecture 4T. */
17576 /* Note: bx (and blx) are required on V5, even if the processor does
17577 not support Thumb. */
17578 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
17581 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
17582 #undef THUMB_VARIANT
17583 #define THUMB_VARIANT & arm_ext_v5t
17585 /* Note: blx has 2 variants; the .value coded here is for
17586 BLX(2). Only this variant has conditional execution. */
17587 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
17588 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
17590 #undef THUMB_VARIANT
17591 #define THUMB_VARIANT & arm_ext_v6t2
17593 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
17594 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17595 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17596 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17597 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17598 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
17599 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
17600 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
17603 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
17604 #undef THUMB_VARIANT
17605 #define THUMB_VARIANT &arm_ext_v5exp
17607 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17608 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17609 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17610 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17612 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17613 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17615 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17616 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17617 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17618 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17620 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17621 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17622 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17623 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17625 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17626 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17628 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17629 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17630 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17631 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17634 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
17635 #undef THUMB_VARIANT
17636 #define THUMB_VARIANT &arm_ext_v6t2
17638 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
17639 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
17641 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
17642 ADDRGLDRS), ldrd, t_ldstd),
17644 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17645 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17648 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
17650 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
17653 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
17654 #undef THUMB_VARIANT
17655 #define THUMB_VARIANT & arm_ext_v6
17657 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
17658 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
17659 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
17660 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
17661 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
17662 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17663 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17664 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17665 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17666 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
17668 #undef THUMB_VARIANT
17669 #define THUMB_VARIANT & arm_ext_v6t2
17671 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
17672 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17674 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17675 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17677 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
17678 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
17680 /* ARM V6 not included in V7M. */
17681 #undef THUMB_VARIANT
17682 #define THUMB_VARIANT & arm_ext_v6_notm
17683 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
17684 UF(rfeib, 9900a00, 1, (RRw), rfe),
17685 UF(rfeda, 8100a00, 1, (RRw), rfe),
17686 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
17687 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
17688 UF(rfefa, 9900a00, 1, (RRw), rfe),
17689 UF(rfeea, 8100a00, 1, (RRw), rfe),
17690 TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
17691 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
17692 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
17693 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
17694 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
17696 /* ARM V6 not included in V7M (eg. integer SIMD). */
17697 #undef THUMB_VARIANT
17698 #define THUMB_VARIANT & arm_ext_v6_dsp
17699 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
17700 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
17701 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
17702 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17703 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17704 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17705 /* Old name for QASX. */
17706 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17707 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17708 /* Old name for QSAX. */
17709 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17710 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17711 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17712 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17713 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17714 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17715 /* Old name for SASX. */
17716 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17717 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17718 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17719 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17720 /* Old name for SHASX. */
17721 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17722 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17723 /* Old name for SHSAX. */
17724 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17725 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17726 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17727 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17728 /* Old name for SSAX. */
17729 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17730 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17731 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17732 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17733 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17734 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17735 /* Old name for UASX. */
17736 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17737 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17738 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17739 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17740 /* Old name for UHASX. */
17741 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17742 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17743 /* Old name for UHSAX. */
17744 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17745 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17746 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17747 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17748 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17749 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17750 /* Old name for UQASX. */
17751 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17752 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17753 /* Old name for UQSAX. */
17754 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17755 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17756 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17757 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17758 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17759 /* Old name for USAX. */
17760 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17761 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17762 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17763 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17764 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17765 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17766 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17767 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17768 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17769 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17770 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17771 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17772 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17773 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17774 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17775 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17776 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17777 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17778 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17779 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17780 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17781 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17782 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17783 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17784 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17785 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17786 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17787 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17788 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17789 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
17790 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
17791 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17792 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17793 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
17796 #define ARM_VARIANT & arm_ext_v6k
17797 #undef THUMB_VARIANT
17798 #define THUMB_VARIANT & arm_ext_v6k
17800 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
17801 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
17802 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
17803 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
17805 #undef THUMB_VARIANT
17806 #define THUMB_VARIANT & arm_ext_v6_notm
17807 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
17809 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
17810 RRnpcb), strexd, t_strexd),
17812 #undef THUMB_VARIANT
17813 #define THUMB_VARIANT & arm_ext_v6t2
17814 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
17816 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
17818 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17820 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17822 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
17825 #define ARM_VARIANT & arm_ext_sec
17826 #undef THUMB_VARIANT
17827 #define THUMB_VARIANT & arm_ext_sec
17829 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
17832 #define ARM_VARIANT & arm_ext_virt
17833 #undef THUMB_VARIANT
17834 #define THUMB_VARIANT & arm_ext_virt
17836 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
17837 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
17840 #define ARM_VARIANT & arm_ext_v6t2
17841 #undef THUMB_VARIANT
17842 #define THUMB_VARIANT & arm_ext_v6t2
17844 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
17845 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
17846 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
17847 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
17849 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17850 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
17851 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
17852 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
17854 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17855 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17856 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17857 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17859 /* Thumb-only instructions. */
17861 #define ARM_VARIANT NULL
17862 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
17863 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
17865 /* ARM does not really have an IT instruction, so always allow it.
17866 The opcode is copied from Thumb in order to allow warnings in
17867 -mimplicit-it=[never | arm] modes. */
17869 #define ARM_VARIANT & arm_ext_v1
17871 TUE("it", bf08, bf08, 1, (COND), it, t_it),
17872 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
17873 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
17874 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
17875 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
17876 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
17877 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
17878 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
17879 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
17880 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
17881 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
17882 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
17883 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
17884 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
17885 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
17886 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
17887 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
17888 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
17890 /* Thumb2 only instructions. */
17892 #define ARM_VARIANT NULL
17894 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17895 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17896 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
17897 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
17898 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
17899 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
17901 /* Hardware division instructions. */
17903 #define ARM_VARIANT & arm_ext_adiv
17904 #undef THUMB_VARIANT
17905 #define THUMB_VARIANT & arm_ext_div
17907 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
17908 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
17910 /* ARM V6M/V7 instructions. */
17912 #define ARM_VARIANT & arm_ext_barrier
17913 #undef THUMB_VARIANT
17914 #define THUMB_VARIANT & arm_ext_barrier
17916 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, t_barrier),
17917 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, t_barrier),
17918 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, t_barrier),
17920 /* ARM V7 instructions. */
17922 #define ARM_VARIANT & arm_ext_v7
17923 #undef THUMB_VARIANT
17924 #define THUMB_VARIANT & arm_ext_v7
17926 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
17927 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
17930 #define ARM_VARIANT & arm_ext_mp
17931 #undef THUMB_VARIANT
17932 #define THUMB_VARIANT & arm_ext_mp
17934 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
17937 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
17939 cCE("wfs", e200110, 1, (RR), rd),
17940 cCE("rfs", e300110, 1, (RR), rd),
17941 cCE("wfc", e400110, 1, (RR), rd),
17942 cCE("rfc", e500110, 1, (RR), rd),
17944 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
17945 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
17946 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
17947 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
17949 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
17950 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
17951 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
17952 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
17954 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
17955 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
17956 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
17957 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
17958 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
17959 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
17960 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
17961 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
17962 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
17963 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
17964 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
17965 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
17967 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
17968 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
17969 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
17970 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
17971 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
17972 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
17973 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
17974 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
17975 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
17976 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
17977 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
17978 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
17980 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
17981 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
17982 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
17983 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
17984 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
17985 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
17986 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
17987 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
17988 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
17989 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
17990 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
17991 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
17993 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
17994 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
17995 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
17996 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
17997 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
17998 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
17999 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
18000 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
18001 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
18002 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
18003 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
18004 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
18006 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
18007 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
18008 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
18009 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
18010 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
18011 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
18012 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
18013 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
18014 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
18015 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
18016 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
18017 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
18019 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
18020 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
18021 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
18022 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
18023 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
18024 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
18025 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
18026 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
18027 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
18028 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
18029 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
18030 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
18032 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
18033 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
18034 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
18035 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
18036 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
18037 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
18038 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
18039 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
18040 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
18041 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
18042 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
18043 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
18045 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
18046 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
18047 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
18048 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
18049 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
18050 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
18051 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
18052 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
18053 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
18054 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
18055 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
18056 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
18058 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
18059 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
18060 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
18061 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
18062 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
18063 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
18064 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
18065 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
18066 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
18067 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
18068 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
18069 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
18071 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
18072 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
18073 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
18074 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
18075 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
18076 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
18077 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
18078 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
18079 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
18080 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
18081 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
18082 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
18084 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
18085 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
18086 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
18087 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
18088 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
18089 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
18090 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
18091 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
18092 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
18093 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
18094 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
18095 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
18097 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
18098 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
18099 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
18100 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
18101 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
18102 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
18103 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
18104 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
18105 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
18106 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
18107 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
18108 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
18110 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
18111 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
18112 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
18113 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
18114 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
18115 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
18116 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
18117 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
18118 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
18119 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
18120 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
18121 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
18123 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
18124 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
18125 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
18126 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
18127 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
18128 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
18129 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
18130 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
18131 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
18132 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
18133 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
18134 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
18136 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
18137 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
18138 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
18139 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
18140 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
18141 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
18142 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
18143 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
18144 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
18145 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
18146 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
18147 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
18149 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
18150 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
18151 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
18152 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
18153 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
18154 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
18155 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
18156 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
18157 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
18158 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
18159 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
18160 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
18162 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
18163 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
18164 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
18165 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
18166 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
18167 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18168 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18169 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18170 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
18171 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
18172 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
18173 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
18175 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
18176 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
18177 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
18178 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
18179 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
18180 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18181 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18182 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18183 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
18184 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
18185 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
18186 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
18188 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
18189 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
18190 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
18191 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
18192 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
18193 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18194 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18195 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18196 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
18197 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
18198 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
18199 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
18201 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
18202 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
18203 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
18204 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
18205 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
18206 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18207 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18208 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18209 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
18210 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
18211 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
18212 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
18214 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
18215 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
18216 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
18217 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
18218 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
18219 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18220 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18221 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18222 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
18223 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
18224 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
18225 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
18227 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
18228 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
18229 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
18230 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
18231 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
18232 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18233 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18234 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18235 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
18236 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
18237 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
18238 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
18240 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
18241 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
18242 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
18243 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
18244 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
18245 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18246 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18247 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18248 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
18249 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
18250 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
18251 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
18253 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
18254 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
18255 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
18256 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
18257 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
18258 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18259 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18260 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18261 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
18262 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
18263 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
18264 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
18266 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
18267 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
18268 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
18269 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
18270 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
18271 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18272 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18273 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18274 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
18275 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
18276 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
18277 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
18279 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
18280 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
18281 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
18282 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
18283 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
18284 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18285 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18286 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18287 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18288 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18289 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18290 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18292 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18293 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18294 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18295 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18296 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18297 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18298 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18299 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18300 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18301 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18302 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18303 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18305 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18306 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18307 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18308 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18309 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18310 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18311 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18312 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18313 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18314 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18315 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18316 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18318 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18319 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18320 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18321 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18322 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18323 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18324 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18325 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18326 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18327 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18328 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18329 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18331 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
18332 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
18333 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
18334 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
18336 cCL("flts", e000110, 2, (RF, RR), rn_rd),
18337 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
18338 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
18339 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
18340 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
18341 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
18342 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
18343 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
18344 cCL("flte", e080110, 2, (RF, RR), rn_rd),
18345 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
18346 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
18347 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
18349 /* The implementation of the FIX instruction is broken on some
18350 assemblers, in that it accepts a precision specifier as well as a
18351 rounding specifier, despite the fact that this is meaningless.
18352 To be more compatible, we accept it as well, though of course it
18353 does not set any bits. */
18354 cCE("fix", e100110, 2, (RR, RF), rd_rm),
18355 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
18356 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
18357 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
18358 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
18359 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
18360 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
18361 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
18362 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
18363 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
18364 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
18365 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
18366 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
18368 /* Instructions that were new with the real FPA, call them V2. */
18370 #define ARM_VARIANT & fpu_fpa_ext_v2
18372 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18373 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18374 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18375 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18376 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18377 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18380 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
18382 /* Moves and type conversions. */
18383 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
18384 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
18385 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
18386 cCE("fmstat", ef1fa10, 0, (), noargs),
18387 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
18388 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
18389 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
18390 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
18391 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
18392 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
18393 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
18394 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
18395 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
18396 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
18398 /* Memory operations. */
18399 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
18400 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
18401 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18402 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18403 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18404 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18405 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18406 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18407 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18408 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18409 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18410 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18411 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18412 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18413 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18414 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18415 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18416 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18418 /* Monadic operations. */
18419 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
18420 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
18421 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
18423 /* Dyadic operations. */
18424 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18425 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18426 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18427 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18428 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18429 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18430 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18431 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18432 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18435 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
18436 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
18437 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
18438 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
18440 /* Double precision load/store are still present on single precision
18441 implementations. */
18442 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
18443 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
18444 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18445 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18446 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18447 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18448 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18449 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18450 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18451 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18454 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
18456 /* Moves and type conversions. */
18457 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
18458 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
18459 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
18460 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
18461 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
18462 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
18463 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
18464 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
18465 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
18466 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
18467 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
18468 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
18469 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
18471 /* Monadic operations. */
18472 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
18473 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
18474 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
18476 /* Dyadic operations. */
18477 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18478 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18479 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18480 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18481 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18482 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18483 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18484 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18485 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18488 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
18489 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
18490 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
18491 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
18494 #define ARM_VARIANT & fpu_vfp_ext_v2
18496 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
18497 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
18498 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
18499 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
18501 /* Instructions which may belong to either the Neon or VFP instruction sets.
18502 Individual encoder functions perform additional architecture checks. */
18504 #define ARM_VARIANT & fpu_vfp_ext_v1xd
18505 #undef THUMB_VARIANT
18506 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
18508 /* These mnemonics are unique to VFP. */
18509 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
18510 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
18511 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18512 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18513 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18514 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
18515 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
18516 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
18517 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
18518 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
18520 /* Mnemonics shared by Neon and VFP. */
18521 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
18522 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18523 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18525 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18526 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18528 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18529 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18531 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18532 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18533 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18534 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18535 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18536 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18537 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18538 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18540 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
18541 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
18542 nCEF(vcvtb, _vcvt, 2, (RVS, RVS), neon_cvtb),
18543 nCEF(vcvtt, _vcvt, 2, (RVS, RVS), neon_cvtt),
18546 /* NOTE: All VMOV encoding is special-cased! */
18547 NCE(vmov, 0, 1, (VMOV), neon_mov),
18548 NCE(vmovq, 0, 1, (VMOV), neon_mov),
18550 #undef THUMB_VARIANT
18551 #define THUMB_VARIANT & fpu_neon_ext_v1
18553 #define ARM_VARIANT & fpu_neon_ext_v1
18555 /* Data processing with three registers of the same length. */
18556 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
18557 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
18558 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
18559 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18560 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
18561 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18562 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
18563 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18564 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
18565 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
18566 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18567 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
18568 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18569 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
18570 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18571 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
18572 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18573 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
18574 /* If not immediate, fall back to neon_dyadic_i64_su.
18575 shl_imm should accept I8 I16 I32 I64,
18576 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
18577 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
18578 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
18579 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
18580 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
18581 /* Logic ops, types optional & ignored. */
18582 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18583 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18584 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18585 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18586 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18587 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18588 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18589 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18590 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
18591 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
18592 /* Bitfield ops, untyped. */
18593 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18594 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
18595 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18596 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
18597 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18598 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
18599 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
18600 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18601 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
18602 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18603 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
18604 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18605 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
18606 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
18607 back to neon_dyadic_if_su. */
18608 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18609 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
18610 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18611 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
18612 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18613 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
18614 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18615 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
18616 /* Comparison. Type I8 I16 I32 F32. */
18617 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
18618 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
18619 /* As above, D registers only. */
18620 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
18621 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
18622 /* Int and float variants, signedness unimportant. */
18623 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
18624 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
18625 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
18626 /* Add/sub take types I8 I16 I32 I64 F32. */
18627 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
18628 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
18629 /* vtst takes sizes 8, 16, 32. */
18630 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
18631 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
18632 /* VMUL takes I8 I16 I32 F32 P8. */
18633 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
18634 /* VQD{R}MULH takes S16 S32. */
18635 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18636 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
18637 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18638 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
18639 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18640 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
18641 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18642 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
18643 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18644 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
18645 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18646 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
18647 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
18648 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
18649 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
18650 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
18652 /* Two address, int/float. Types S8 S16 S32 F32. */
18653 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
18654 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
18656 /* Data processing with two registers and a shift amount. */
18657 /* Right shifts, and variants with rounding.
18658 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
18659 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18660 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
18661 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18662 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
18663 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
18664 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
18665 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
18666 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
18667 /* Shift and insert. Sizes accepted 8 16 32 64. */
18668 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
18669 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
18670 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
18671 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
18672 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
18673 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
18674 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
18675 /* Right shift immediate, saturating & narrowing, with rounding variants.
18676 Types accepted S16 S32 S64 U16 U32 U64. */
18677 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18678 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18679 /* As above, unsigned. Types accepted S16 S32 S64. */
18680 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18681 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18682 /* Right shift narrowing. Types accepted I16 I32 I64. */
18683 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18684 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18685 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
18686 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
18687 /* CVT with optional immediate for fixed-point variant. */
18688 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
18690 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
18691 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
18693 /* Data processing, three registers of different lengths. */
18694 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
18695 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
18696 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
18697 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
18698 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
18699 /* If not scalar, fall back to neon_dyadic_long.
18700 Vector types as above, scalar types S16 S32 U16 U32. */
18701 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18702 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18703 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
18704 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18705 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18706 /* Dyadic, narrowing insns. Types I16 I32 I64. */
18707 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18708 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18709 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18710 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18711 /* Saturating doubling multiplies. Types S16 S32. */
18712 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18713 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18714 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18715 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
18716 S16 S32 U16 U32. */
18717 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
18719 /* Extract. Size 8. */
18720 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
18721 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
18723 /* Two registers, miscellaneous. */
18724 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
18725 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
18726 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
18727 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
18728 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
18729 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
18730 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
18731 /* Vector replicate. Sizes 8 16 32. */
18732 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
18733 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
18734 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
18735 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
18736 /* VMOVN. Types I16 I32 I64. */
18737 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
18738 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
18739 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
18740 /* VQMOVUN. Types S16 S32 S64. */
18741 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
18742 /* VZIP / VUZP. Sizes 8 16 32. */
18743 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
18744 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
18745 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
18746 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
18747 /* VQABS / VQNEG. Types S8 S16 S32. */
18748 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
18749 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
18750 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
18751 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
18752 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
18753 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
18754 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
18755 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
18756 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
18757 /* Reciprocal estimates. Types U32 F32. */
18758 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
18759 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
18760 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
18761 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
18762 /* VCLS. Types S8 S16 S32. */
18763 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
18764 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
18765 /* VCLZ. Types I8 I16 I32. */
18766 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
18767 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
18768 /* VCNT. Size 8. */
18769 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
18770 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
18771 /* Two address, untyped. */
18772 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
18773 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
18774 /* VTRN. Sizes 8 16 32. */
18775 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
18776 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
18778 /* Table lookup. Size 8. */
18779 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18780 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18782 #undef THUMB_VARIANT
18783 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
18785 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
18787 /* Neon element/structure load/store. */
18788 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
18789 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
18790 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
18791 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
18792 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
18793 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
18794 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
18795 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
18797 #undef THUMB_VARIANT
18798 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
18800 #define ARM_VARIANT &fpu_vfp_ext_v3xd
18801 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
18802 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18803 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18804 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18805 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18806 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18807 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18808 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18809 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18811 #undef THUMB_VARIANT
18812 #define THUMB_VARIANT & fpu_vfp_ext_v3
18814 #define ARM_VARIANT & fpu_vfp_ext_v3
18816 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
18817 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18818 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18819 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18820 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18821 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18822 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18823 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18824 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18827 #define ARM_VARIANT &fpu_vfp_ext_fma
18828 #undef THUMB_VARIANT
18829 #define THUMB_VARIANT &fpu_vfp_ext_fma
18830 /* Mnemonics shared by Neon and VFP. These are included in the
18831 VFP FMA variant; NEON and VFP FMA always includes the NEON
18832 FMA instructions. */
18833 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18834 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18835 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18836 the v form should always be used. */
18837 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18838 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18839 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18840 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18841 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18842 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18844 #undef THUMB_VARIANT
18846 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
18848 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18849 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18850 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18851 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18852 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18853 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18854 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
18855 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
18858 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
18860 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
18861 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
18862 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
18863 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
18864 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
18865 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
18866 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
18867 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
18868 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
18869 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18870 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18871 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18872 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18873 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18874 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18875 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18876 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18877 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18878 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
18879 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
18880 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18881 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18882 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18883 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18884 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18885 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18886 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
18887 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
18888 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
18889 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
18890 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
18891 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
18892 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
18893 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
18894 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
18895 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
18896 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
18897 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18898 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18899 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18900 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18901 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18902 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18903 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18904 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18905 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18906 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
18907 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18908 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18909 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18910 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18911 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18912 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18913 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18914 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18915 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18916 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18917 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18918 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18919 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18920 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18921 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18922 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18923 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18924 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18925 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18926 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18927 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18928 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
18929 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
18930 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18931 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18932 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18933 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18934 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18935 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18936 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18937 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18938 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18939 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18940 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18941 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18942 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18943 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18944 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18945 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18946 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18947 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18948 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
18949 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18950 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18951 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18952 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18953 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18954 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18955 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18956 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18957 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18958 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18959 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18960 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18961 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18962 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18963 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18964 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18965 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18966 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18967 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18968 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18969 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18970 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
18971 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18972 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18973 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18974 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18975 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18976 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18977 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18978 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18979 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18980 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18981 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18982 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18983 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18984 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18985 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18986 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18987 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18988 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18989 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18990 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18991 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
18992 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
18993 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18994 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18995 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18996 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18997 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18998 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18999 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19000 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19001 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19002 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
19003 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
19004 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
19005 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
19006 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
19007 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
19008 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19009 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19010 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19011 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
19012 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
19013 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
19014 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
19015 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
19016 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
19017 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19018 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19019 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19020 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19021 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
19024 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
19026 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
19027 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
19028 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
19029 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
19030 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
19031 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
19032 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19033 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19034 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19035 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19036 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19037 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19038 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19039 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19040 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19041 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19042 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19043 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19044 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19045 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19046 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
19047 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19048 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19049 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19050 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19051 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19052 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19053 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19054 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19055 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19056 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19057 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19058 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19059 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19060 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19061 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19062 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19063 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19064 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19065 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19066 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19067 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19068 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19069 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19070 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19071 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19072 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19073 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19074 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19075 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19076 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19077 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19078 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19079 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19080 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19081 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19082 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19085 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
19087 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
19088 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
19089 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
19090 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
19091 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
19092 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
19093 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
19094 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
19095 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
19096 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
19097 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
19098 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
19099 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
19100 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
19101 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
19102 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
19103 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
19104 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
19105 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
19106 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
19107 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
19108 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
19109 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
19110 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
19111 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
19112 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
19113 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
19114 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
19115 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
19116 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
19117 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
19118 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
19119 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
19120 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
19121 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
19122 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
19123 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
19124 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
19125 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
19126 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
19127 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
19128 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
19129 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
19130 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
19131 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
19132 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
19133 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
19134 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
19135 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
19136 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
19137 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
19138 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
19139 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
19140 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
19141 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
19142 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
19143 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
19144 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
19145 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
19146 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
19147 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
19148 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
19149 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
19150 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
19151 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19152 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19153 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19154 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19155 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19156 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
19157 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19158 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
19159 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19160 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
19161 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19162 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19165 #undef THUMB_VARIANT
19192 /* MD interface: bits in the object file. */
19194 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
19195 for use in the a.out file, and stores them in the array pointed to by buf.
19196 This knows about the endian-ness of the target machine and does
19197 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
19198 2 (short) and 4 (long) Floating numbers are put out as a series of
19199 LITTLENUMS (shorts, here at least). */
19202 md_number_to_chars (char * buf, valueT val, int n)
19204 if (target_big_endian)
19205 number_to_chars_bigendian (buf, val, n);
19207 number_to_chars_littleendian (buf, val, n);
19211 md_chars_to_number (char * buf, int n)
19214 unsigned char * where = (unsigned char *) buf;
19216 if (target_big_endian)
19221 result |= (*where++ & 255);
19229 result |= (where[n] & 255);
19236 /* MD interface: Sections. */
19238 /* Calculate the maximum variable size (i.e., excluding fr_fix)
19239 that an rs_machine_dependent frag may reach. */
19242 arm_frag_max_var (fragS *fragp)
19244 /* We only use rs_machine_dependent for variable-size Thumb instructions,
19245 which are either THUMB_SIZE (2) or INSN_SIZE (4).
19247 Note that we generate relaxable instructions even for cases that don't
19248 really need it, like an immediate that's a trivial constant. So we're
19249 overestimating the instruction size for some of those cases. Rather
19250 than putting more intelligence here, it would probably be better to
19251 avoid generating a relaxation frag in the first place when it can be
19252 determined up front that a short instruction will suffice. */
19254 gas_assert (fragp->fr_type == rs_machine_dependent);
19258 /* Estimate the size of a frag before relaxing. Assume everything fits in
19262 md_estimate_size_before_relax (fragS * fragp,
19263 segT segtype ATTRIBUTE_UNUSED)
19269 /* Convert a machine dependent frag. */
19272 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
19274 unsigned long insn;
19275 unsigned long old_op;
19283 buf = fragp->fr_literal + fragp->fr_fix;
19285 old_op = bfd_get_16(abfd, buf);
19286 if (fragp->fr_symbol)
19288 exp.X_op = O_symbol;
19289 exp.X_add_symbol = fragp->fr_symbol;
19293 exp.X_op = O_constant;
19295 exp.X_add_number = fragp->fr_offset;
19296 opcode = fragp->fr_subtype;
19299 case T_MNEM_ldr_pc:
19300 case T_MNEM_ldr_pc2:
19301 case T_MNEM_ldr_sp:
19302 case T_MNEM_str_sp:
19309 if (fragp->fr_var == 4)
19311 insn = THUMB_OP32 (opcode);
19312 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19314 insn |= (old_op & 0x700) << 4;
19318 insn |= (old_op & 7) << 12;
19319 insn |= (old_op & 0x38) << 13;
19321 insn |= 0x00000c00;
19322 put_thumb32_insn (buf, insn);
19323 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19327 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
19329 pc_rel = (opcode == T_MNEM_ldr_pc2);
19332 if (fragp->fr_var == 4)
19334 insn = THUMB_OP32 (opcode);
19335 insn |= (old_op & 0xf0) << 4;
19336 put_thumb32_insn (buf, insn);
19337 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
19341 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19342 exp.X_add_number -= 4;
19350 if (fragp->fr_var == 4)
19352 int r0off = (opcode == T_MNEM_mov
19353 || opcode == T_MNEM_movs) ? 0 : 8;
19354 insn = THUMB_OP32 (opcode);
19355 insn = (insn & 0xe1ffffff) | 0x10000000;
19356 insn |= (old_op & 0x700) << r0off;
19357 put_thumb32_insn (buf, insn);
19358 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19362 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
19367 if (fragp->fr_var == 4)
19369 insn = THUMB_OP32(opcode);
19370 put_thumb32_insn (buf, insn);
19371 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
19374 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
19378 if (fragp->fr_var == 4)
19380 insn = THUMB_OP32(opcode);
19381 insn |= (old_op & 0xf00) << 14;
19382 put_thumb32_insn (buf, insn);
19383 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
19386 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
19389 case T_MNEM_add_sp:
19390 case T_MNEM_add_pc:
19391 case T_MNEM_inc_sp:
19392 case T_MNEM_dec_sp:
19393 if (fragp->fr_var == 4)
19395 /* ??? Choose between add and addw. */
19396 insn = THUMB_OP32 (opcode);
19397 insn |= (old_op & 0xf0) << 4;
19398 put_thumb32_insn (buf, insn);
19399 if (opcode == T_MNEM_add_pc)
19400 reloc_type = BFD_RELOC_ARM_T32_IMM12;
19402 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19405 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19413 if (fragp->fr_var == 4)
19415 insn = THUMB_OP32 (opcode);
19416 insn |= (old_op & 0xf0) << 4;
19417 insn |= (old_op & 0xf) << 16;
19418 put_thumb32_insn (buf, insn);
19419 if (insn & (1 << 20))
19420 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19422 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19425 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19431 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
19432 (enum bfd_reloc_code_real) reloc_type);
19433 fixp->fx_file = fragp->fr_file;
19434 fixp->fx_line = fragp->fr_line;
19435 fragp->fr_fix += fragp->fr_var;
19438 /* Return the size of a relaxable immediate operand instruction.
19439 SHIFT and SIZE specify the form of the allowable immediate. */
19441 relax_immediate (fragS *fragp, int size, int shift)
19447 /* ??? Should be able to do better than this. */
19448 if (fragp->fr_symbol)
19451 low = (1 << shift) - 1;
19452 mask = (1 << (shift + size)) - (1 << shift);
19453 offset = fragp->fr_offset;
19454 /* Force misaligned offsets to 32-bit variant. */
19457 if (offset & ~mask)
19462 /* Get the address of a symbol during relaxation. */
19464 relaxed_symbol_addr (fragS *fragp, long stretch)
19470 sym = fragp->fr_symbol;
19471 sym_frag = symbol_get_frag (sym);
19472 know (S_GET_SEGMENT (sym) != absolute_section
19473 || sym_frag == &zero_address_frag);
19474 addr = S_GET_VALUE (sym) + fragp->fr_offset;
19476 /* If frag has yet to be reached on this pass, assume it will
19477 move by STRETCH just as we did. If this is not so, it will
19478 be because some frag between grows, and that will force
19482 && sym_frag->relax_marker != fragp->relax_marker)
19486 /* Adjust stretch for any alignment frag. Note that if have
19487 been expanding the earlier code, the symbol may be
19488 defined in what appears to be an earlier frag. FIXME:
19489 This doesn't handle the fr_subtype field, which specifies
19490 a maximum number of bytes to skip when doing an
19492 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
19494 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
19497 stretch = - ((- stretch)
19498 & ~ ((1 << (int) f->fr_offset) - 1));
19500 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
19512 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
19515 relax_adr (fragS *fragp, asection *sec, long stretch)
19520 /* Assume worst case for symbols not known to be in the same section. */
19521 if (fragp->fr_symbol == NULL
19522 || !S_IS_DEFINED (fragp->fr_symbol)
19523 || sec != S_GET_SEGMENT (fragp->fr_symbol)
19524 || S_IS_WEAK (fragp->fr_symbol))
19527 val = relaxed_symbol_addr (fragp, stretch);
19528 addr = fragp->fr_address + fragp->fr_fix;
19529 addr = (addr + 4) & ~3;
19530 /* Force misaligned targets to 32-bit variant. */
19534 if (val < 0 || val > 1020)
19539 /* Return the size of a relaxable add/sub immediate instruction. */
19541 relax_addsub (fragS *fragp, asection *sec)
19546 buf = fragp->fr_literal + fragp->fr_fix;
19547 op = bfd_get_16(sec->owner, buf);
19548 if ((op & 0xf) == ((op >> 4) & 0xf))
19549 return relax_immediate (fragp, 8, 0);
19551 return relax_immediate (fragp, 3, 0);
19555 /* Return the size of a relaxable branch instruction. BITS is the
19556 size of the offset field in the narrow instruction. */
19559 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
19565 /* Assume worst case for symbols not known to be in the same section. */
19566 if (!S_IS_DEFINED (fragp->fr_symbol)
19567 || sec != S_GET_SEGMENT (fragp->fr_symbol)
19568 || S_IS_WEAK (fragp->fr_symbol))
19572 if (S_IS_DEFINED (fragp->fr_symbol)
19573 && ARM_IS_FUNC (fragp->fr_symbol))
19576 /* PR 12532. Global symbols with default visibility might
19577 be preempted, so do not relax relocations to them. */
19578 if ((ELF_ST_VISIBILITY (S_GET_OTHER (fragp->fr_symbol)) == STV_DEFAULT)
19579 && (! S_IS_LOCAL (fragp->fr_symbol)))
19583 val = relaxed_symbol_addr (fragp, stretch);
19584 addr = fragp->fr_address + fragp->fr_fix + 4;
19587 /* Offset is a signed value *2 */
19589 if (val >= limit || val < -limit)
19595 /* Relax a machine dependent frag. This returns the amount by which
19596 the current size of the frag should change. */
19599 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
19604 oldsize = fragp->fr_var;
19605 switch (fragp->fr_subtype)
19607 case T_MNEM_ldr_pc2:
19608 newsize = relax_adr (fragp, sec, stretch);
19610 case T_MNEM_ldr_pc:
19611 case T_MNEM_ldr_sp:
19612 case T_MNEM_str_sp:
19613 newsize = relax_immediate (fragp, 8, 2);
19617 newsize = relax_immediate (fragp, 5, 2);
19621 newsize = relax_immediate (fragp, 5, 1);
19625 newsize = relax_immediate (fragp, 5, 0);
19628 newsize = relax_adr (fragp, sec, stretch);
19634 newsize = relax_immediate (fragp, 8, 0);
19637 newsize = relax_branch (fragp, sec, 11, stretch);
19640 newsize = relax_branch (fragp, sec, 8, stretch);
19642 case T_MNEM_add_sp:
19643 case T_MNEM_add_pc:
19644 newsize = relax_immediate (fragp, 8, 2);
19646 case T_MNEM_inc_sp:
19647 case T_MNEM_dec_sp:
19648 newsize = relax_immediate (fragp, 7, 2);
19654 newsize = relax_addsub (fragp, sec);
19660 fragp->fr_var = newsize;
19661 /* Freeze wide instructions that are at or before the same location as
19662 in the previous pass. This avoids infinite loops.
19663 Don't freeze them unconditionally because targets may be artificially
19664 misaligned by the expansion of preceding frags. */
19665 if (stretch <= 0 && newsize > 2)
19667 md_convert_frag (sec->owner, sec, fragp);
19671 return newsize - oldsize;
19674 /* Round up a section size to the appropriate boundary. */
19677 md_section_align (segT segment ATTRIBUTE_UNUSED,
19680 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
19681 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
19683 /* For a.out, force the section size to be aligned. If we don't do
19684 this, BFD will align it for us, but it will not write out the
19685 final bytes of the section. This may be a bug in BFD, but it is
19686 easier to fix it here since that is how the other a.out targets
19690 align = bfd_get_section_alignment (stdoutput, segment);
19691 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
19698 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
19699 of an rs_align_code fragment. */
19702 arm_handle_align (fragS * fragP)
19704 static char const arm_noop[2][2][4] =
19707 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
19708 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
19711 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
19712 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
19715 static char const thumb_noop[2][2][2] =
19718 {0xc0, 0x46}, /* LE */
19719 {0x46, 0xc0}, /* BE */
19722 {0x00, 0xbf}, /* LE */
19723 {0xbf, 0x00} /* BE */
19726 static char const wide_thumb_noop[2][4] =
19727 { /* Wide Thumb-2 */
19728 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
19729 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
19732 unsigned bytes, fix, noop_size;
19735 const char *narrow_noop = NULL;
19740 if (fragP->fr_type != rs_align_code)
19743 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
19744 p = fragP->fr_literal + fragP->fr_fix;
19747 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
19748 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
19750 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
19752 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
19754 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
19756 narrow_noop = thumb_noop[1][target_big_endian];
19757 noop = wide_thumb_noop[target_big_endian];
19760 noop = thumb_noop[0][target_big_endian];
19768 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
19769 [target_big_endian];
19776 fragP->fr_var = noop_size;
19778 if (bytes & (noop_size - 1))
19780 fix = bytes & (noop_size - 1);
19782 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
19784 memset (p, 0, fix);
19791 if (bytes & noop_size)
19793 /* Insert a narrow noop. */
19794 memcpy (p, narrow_noop, noop_size);
19796 bytes -= noop_size;
19800 /* Use wide noops for the remainder */
19804 while (bytes >= noop_size)
19806 memcpy (p, noop, noop_size);
19808 bytes -= noop_size;
19812 fragP->fr_fix += fix;
19815 /* Called from md_do_align. Used to create an alignment
19816 frag in a code section. */
19819 arm_frag_align_code (int n, int max)
19823 /* We assume that there will never be a requirement
19824 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
19825 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
19830 _("alignments greater than %d bytes not supported in .text sections."),
19831 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
19832 as_fatal ("%s", err_msg);
19835 p = frag_var (rs_align_code,
19836 MAX_MEM_FOR_RS_ALIGN_CODE,
19838 (relax_substateT) max,
19845 /* Perform target specific initialisation of a frag.
19846 Note - despite the name this initialisation is not done when the frag
19847 is created, but only when its type is assigned. A frag can be created
19848 and used a long time before its type is set, so beware of assuming that
19849 this initialisationis performed first. */
19853 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
19855 /* Record whether this frag is in an ARM or a THUMB area. */
19856 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19859 #else /* OBJ_ELF is defined. */
19861 arm_init_frag (fragS * fragP, int max_chars)
19863 /* If the current ARM vs THUMB mode has not already
19864 been recorded into this frag then do so now. */
19865 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
19867 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19869 /* Record a mapping symbol for alignment frags. We will delete this
19870 later if the alignment ends up empty. */
19871 switch (fragP->fr_type)
19874 case rs_align_test:
19876 mapping_state_2 (MAP_DATA, max_chars);
19878 case rs_align_code:
19879 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
19887 /* When we change sections we need to issue a new mapping symbol. */
19890 arm_elf_change_section (void)
19892 /* Link an unlinked unwind index table section to the .text section. */
19893 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
19894 && elf_linked_to_section (now_seg) == NULL)
19895 elf_linked_to_section (now_seg) = text_section;
19899 arm_elf_section_type (const char * str, size_t len)
19901 if (len == 5 && strncmp (str, "exidx", 5) == 0)
19902 return SHT_ARM_EXIDX;
19907 /* Code to deal with unwinding tables. */
19909 static void add_unwind_adjustsp (offsetT);
19911 /* Generate any deferred unwind frame offset. */
19914 flush_pending_unwind (void)
19918 offset = unwind.pending_offset;
19919 unwind.pending_offset = 0;
19921 add_unwind_adjustsp (offset);
19924 /* Add an opcode to this list for this function. Two-byte opcodes should
19925 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
19929 add_unwind_opcode (valueT op, int length)
19931 /* Add any deferred stack adjustment. */
19932 if (unwind.pending_offset)
19933 flush_pending_unwind ();
19935 unwind.sp_restored = 0;
19937 if (unwind.opcode_count + length > unwind.opcode_alloc)
19939 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
19940 if (unwind.opcodes)
19941 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
19942 unwind.opcode_alloc);
19944 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
19949 unwind.opcodes[unwind.opcode_count] = op & 0xff;
19951 unwind.opcode_count++;
19955 /* Add unwind opcodes to adjust the stack pointer. */
19958 add_unwind_adjustsp (offsetT offset)
19962 if (offset > 0x200)
19964 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
19969 /* Long form: 0xb2, uleb128. */
19970 /* This might not fit in a word so add the individual bytes,
19971 remembering the list is built in reverse order. */
19972 o = (valueT) ((offset - 0x204) >> 2);
19974 add_unwind_opcode (0, 1);
19976 /* Calculate the uleb128 encoding of the offset. */
19980 bytes[n] = o & 0x7f;
19986 /* Add the insn. */
19988 add_unwind_opcode (bytes[n - 1], 1);
19989 add_unwind_opcode (0xb2, 1);
19991 else if (offset > 0x100)
19993 /* Two short opcodes. */
19994 add_unwind_opcode (0x3f, 1);
19995 op = (offset - 0x104) >> 2;
19996 add_unwind_opcode (op, 1);
19998 else if (offset > 0)
20000 /* Short opcode. */
20001 op = (offset - 4) >> 2;
20002 add_unwind_opcode (op, 1);
20004 else if (offset < 0)
20007 while (offset > 0x100)
20009 add_unwind_opcode (0x7f, 1);
20012 op = ((offset - 4) >> 2) | 0x40;
20013 add_unwind_opcode (op, 1);
20017 /* Finish the list of unwind opcodes for this function. */
20019 finish_unwind_opcodes (void)
20023 if (unwind.fp_used)
20025 /* Adjust sp as necessary. */
20026 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
20027 flush_pending_unwind ();
20029 /* After restoring sp from the frame pointer. */
20030 op = 0x90 | unwind.fp_reg;
20031 add_unwind_opcode (op, 1);
20034 flush_pending_unwind ();
20038 /* Start an exception table entry. If idx is nonzero this is an index table
20042 start_unwind_section (const segT text_seg, int idx)
20044 const char * text_name;
20045 const char * prefix;
20046 const char * prefix_once;
20047 const char * group_name;
20051 size_t sec_name_len;
20058 prefix = ELF_STRING_ARM_unwind;
20059 prefix_once = ELF_STRING_ARM_unwind_once;
20060 type = SHT_ARM_EXIDX;
20064 prefix = ELF_STRING_ARM_unwind_info;
20065 prefix_once = ELF_STRING_ARM_unwind_info_once;
20066 type = SHT_PROGBITS;
20069 text_name = segment_name (text_seg);
20070 if (streq (text_name, ".text"))
20073 if (strncmp (text_name, ".gnu.linkonce.t.",
20074 strlen (".gnu.linkonce.t.")) == 0)
20076 prefix = prefix_once;
20077 text_name += strlen (".gnu.linkonce.t.");
20080 prefix_len = strlen (prefix);
20081 text_len = strlen (text_name);
20082 sec_name_len = prefix_len + text_len;
20083 sec_name = (char *) xmalloc (sec_name_len + 1);
20084 memcpy (sec_name, prefix, prefix_len);
20085 memcpy (sec_name + prefix_len, text_name, text_len);
20086 sec_name[prefix_len + text_len] = '\0';
20092 /* Handle COMDAT group. */
20093 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
20095 group_name = elf_group_name (text_seg);
20096 if (group_name == NULL)
20098 as_bad (_("Group section `%s' has no group signature"),
20099 segment_name (text_seg));
20100 ignore_rest_of_line ();
20103 flags |= SHF_GROUP;
20107 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
20109 /* Set the section link for index tables. */
20111 elf_linked_to_section (now_seg) = text_seg;
20115 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
20116 personality routine data. Returns zero, or the index table value for
20117 and inline entry. */
20120 create_unwind_entry (int have_data)
20125 /* The current word of data. */
20127 /* The number of bytes left in this word. */
20130 finish_unwind_opcodes ();
20132 /* Remember the current text section. */
20133 unwind.saved_seg = now_seg;
20134 unwind.saved_subseg = now_subseg;
20136 start_unwind_section (now_seg, 0);
20138 if (unwind.personality_routine == NULL)
20140 if (unwind.personality_index == -2)
20143 as_bad (_("handlerdata in cantunwind frame"));
20144 return 1; /* EXIDX_CANTUNWIND. */
20147 /* Use a default personality routine if none is specified. */
20148 if (unwind.personality_index == -1)
20150 if (unwind.opcode_count > 3)
20151 unwind.personality_index = 1;
20153 unwind.personality_index = 0;
20156 /* Space for the personality routine entry. */
20157 if (unwind.personality_index == 0)
20159 if (unwind.opcode_count > 3)
20160 as_bad (_("too many unwind opcodes for personality routine 0"));
20164 /* All the data is inline in the index table. */
20167 while (unwind.opcode_count > 0)
20169 unwind.opcode_count--;
20170 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20174 /* Pad with "finish" opcodes. */
20176 data = (data << 8) | 0xb0;
20183 /* We get two opcodes "free" in the first word. */
20184 size = unwind.opcode_count - 2;
20188 gas_assert (unwind.personality_index == -1);
20190 /* An extra byte is required for the opcode count. */
20191 size = unwind.opcode_count + 1;
20194 size = (size + 3) >> 2;
20196 as_bad (_("too many unwind opcodes"));
20198 frag_align (2, 0, 0);
20199 record_alignment (now_seg, 2);
20200 unwind.table_entry = expr_build_dot ();
20202 /* Allocate the table entry. */
20203 ptr = frag_more ((size << 2) + 4);
20204 /* PR 13449: Zero the table entries in case some of them are not used. */
20205 memset (ptr, 0, (size << 2) + 4);
20206 where = frag_now_fix () - ((size << 2) + 4);
20208 switch (unwind.personality_index)
20211 /* ??? Should this be a PLT generating relocation? */
20212 /* Custom personality routine. */
20213 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
20214 BFD_RELOC_ARM_PREL31);
20219 /* Set the first byte to the number of additional words. */
20220 data = size > 0 ? size - 1 : 0;
20224 /* ABI defined personality routines. */
20226 /* Three opcodes bytes are packed into the first word. */
20233 /* The size and first two opcode bytes go in the first word. */
20234 data = ((0x80 + unwind.personality_index) << 8) | size;
20239 /* Should never happen. */
20243 /* Pack the opcodes into words (MSB first), reversing the list at the same
20245 while (unwind.opcode_count > 0)
20249 md_number_to_chars (ptr, data, 4);
20254 unwind.opcode_count--;
20256 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20259 /* Finish off the last word. */
20262 /* Pad with "finish" opcodes. */
20264 data = (data << 8) | 0xb0;
20266 md_number_to_chars (ptr, data, 4);
20271 /* Add an empty descriptor if there is no user-specified data. */
20272 ptr = frag_more (4);
20273 md_number_to_chars (ptr, 0, 4);
20280 /* Initialize the DWARF-2 unwind information for this procedure. */
20283 tc_arm_frame_initial_instructions (void)
20285 cfi_add_CFA_def_cfa (REG_SP, 0);
20287 #endif /* OBJ_ELF */
20289 /* Convert REGNAME to a DWARF-2 register number. */
20292 tc_arm_regname_to_dw2regnum (char *regname)
20294 int reg = arm_reg_parse (®name, REG_TYPE_RN);
20304 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
20308 exp.X_op = O_secrel;
20309 exp.X_add_symbol = symbol;
20310 exp.X_add_number = 0;
20311 emit_expr (&exp, size);
20315 /* MD interface: Symbol and relocation handling. */
20317 /* Return the address within the segment that a PC-relative fixup is
20318 relative to. For ARM, PC-relative fixups applied to instructions
20319 are generally relative to the location of the fixup plus 8 bytes.
20320 Thumb branches are offset by 4, and Thumb loads relative to PC
20321 require special handling. */
20324 md_pcrel_from_section (fixS * fixP, segT seg)
20326 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
20328 /* If this is pc-relative and we are going to emit a relocation
20329 then we just want to put out any pipeline compensation that the linker
20330 will need. Otherwise we want to use the calculated base.
20331 For WinCE we skip the bias for externals as well, since this
20332 is how the MS ARM-CE assembler behaves and we want to be compatible. */
20334 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
20335 || (arm_force_relocation (fixP)
20337 && !S_IS_EXTERNAL (fixP->fx_addsy)
20343 switch (fixP->fx_r_type)
20345 /* PC relative addressing on the Thumb is slightly odd as the
20346 bottom two bits of the PC are forced to zero for the
20347 calculation. This happens *after* application of the
20348 pipeline offset. However, Thumb adrl already adjusts for
20349 this, so we need not do it again. */
20350 case BFD_RELOC_ARM_THUMB_ADD:
20353 case BFD_RELOC_ARM_THUMB_OFFSET:
20354 case BFD_RELOC_ARM_T32_OFFSET_IMM:
20355 case BFD_RELOC_ARM_T32_ADD_PC12:
20356 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20357 return (base + 4) & ~3;
20359 /* Thumb branches are simply offset by +4. */
20360 case BFD_RELOC_THUMB_PCREL_BRANCH7:
20361 case BFD_RELOC_THUMB_PCREL_BRANCH9:
20362 case BFD_RELOC_THUMB_PCREL_BRANCH12:
20363 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20364 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20367 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20369 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20370 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20371 && ARM_IS_FUNC (fixP->fx_addsy)
20372 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20373 base = fixP->fx_where + fixP->fx_frag->fr_address;
20376 /* BLX is like branches above, but forces the low two bits of PC to
20378 case BFD_RELOC_THUMB_PCREL_BLX:
20380 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20381 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20382 && THUMB_IS_FUNC (fixP->fx_addsy)
20383 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20384 base = fixP->fx_where + fixP->fx_frag->fr_address;
20385 return (base + 4) & ~3;
20387 /* ARM mode branches are offset by +8. However, the Windows CE
20388 loader expects the relocation not to take this into account. */
20389 case BFD_RELOC_ARM_PCREL_BLX:
20391 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20392 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20393 && ARM_IS_FUNC (fixP->fx_addsy)
20394 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20395 base = fixP->fx_where + fixP->fx_frag->fr_address;
20398 case BFD_RELOC_ARM_PCREL_CALL:
20400 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20401 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20402 && THUMB_IS_FUNC (fixP->fx_addsy)
20403 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20404 base = fixP->fx_where + fixP->fx_frag->fr_address;
20407 case BFD_RELOC_ARM_PCREL_BRANCH:
20408 case BFD_RELOC_ARM_PCREL_JUMP:
20409 case BFD_RELOC_ARM_PLT32:
20411 /* When handling fixups immediately, because we have already
20412 discovered the value of a symbol, or the address of the frag involved
20413 we must account for the offset by +8, as the OS loader will never see the reloc.
20414 see fixup_segment() in write.c
20415 The S_IS_EXTERNAL test handles the case of global symbols.
20416 Those need the calculated base, not just the pipe compensation the linker will need. */
20418 && fixP->fx_addsy != NULL
20419 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20420 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
20428 /* ARM mode loads relative to PC are also offset by +8. Unlike
20429 branches, the Windows CE loader *does* expect the relocation
20430 to take this into account. */
20431 case BFD_RELOC_ARM_OFFSET_IMM:
20432 case BFD_RELOC_ARM_OFFSET_IMM8:
20433 case BFD_RELOC_ARM_HWLITERAL:
20434 case BFD_RELOC_ARM_LITERAL:
20435 case BFD_RELOC_ARM_CP_OFF_IMM:
20439 /* Other PC-relative relocations are un-offset. */
20445 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
20446 Otherwise we have no need to default values of symbols. */
20449 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
20452 if (name[0] == '_' && name[1] == 'G'
20453 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
20457 if (symbol_find (name))
20458 as_bad (_("GOT already in the symbol table"));
20460 GOT_symbol = symbol_new (name, undefined_section,
20461 (valueT) 0, & zero_address_frag);
20471 /* Subroutine of md_apply_fix. Check to see if an immediate can be
20472 computed as two separate immediate values, added together. We
20473 already know that this value cannot be computed by just one ARM
20476 static unsigned int
20477 validate_immediate_twopart (unsigned int val,
20478 unsigned int * highpart)
20483 for (i = 0; i < 32; i += 2)
20484 if (((a = rotate_left (val, i)) & 0xff) != 0)
20490 * highpart = (a >> 8) | ((i + 24) << 7);
20492 else if (a & 0xff0000)
20494 if (a & 0xff000000)
20496 * highpart = (a >> 16) | ((i + 16) << 7);
20500 gas_assert (a & 0xff000000);
20501 * highpart = (a >> 24) | ((i + 8) << 7);
20504 return (a & 0xff) | (i << 7);
20511 validate_offset_imm (unsigned int val, int hwse)
20513 if ((hwse && val > 255) || val > 4095)
20518 /* Subroutine of md_apply_fix. Do those data_ops which can take a
20519 negative immediate constant by altering the instruction. A bit of
20524 by inverting the second operand, and
20527 by negating the second operand. */
20530 negate_data_op (unsigned long * instruction,
20531 unsigned long value)
20534 unsigned long negated, inverted;
20536 negated = encode_arm_immediate (-value);
20537 inverted = encode_arm_immediate (~value);
20539 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
20542 /* First negates. */
20543 case OPCODE_SUB: /* ADD <-> SUB */
20544 new_inst = OPCODE_ADD;
20549 new_inst = OPCODE_SUB;
20553 case OPCODE_CMP: /* CMP <-> CMN */
20554 new_inst = OPCODE_CMN;
20559 new_inst = OPCODE_CMP;
20563 /* Now Inverted ops. */
20564 case OPCODE_MOV: /* MOV <-> MVN */
20565 new_inst = OPCODE_MVN;
20570 new_inst = OPCODE_MOV;
20574 case OPCODE_AND: /* AND <-> BIC */
20575 new_inst = OPCODE_BIC;
20580 new_inst = OPCODE_AND;
20584 case OPCODE_ADC: /* ADC <-> SBC */
20585 new_inst = OPCODE_SBC;
20590 new_inst = OPCODE_ADC;
20594 /* We cannot do anything. */
20599 if (value == (unsigned) FAIL)
20602 *instruction &= OPCODE_MASK;
20603 *instruction |= new_inst << DATA_OP_SHIFT;
20607 /* Like negate_data_op, but for Thumb-2. */
20609 static unsigned int
20610 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
20614 unsigned int negated, inverted;
20616 negated = encode_thumb32_immediate (-value);
20617 inverted = encode_thumb32_immediate (~value);
20619 rd = (*instruction >> 8) & 0xf;
20620 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
20623 /* ADD <-> SUB. Includes CMP <-> CMN. */
20624 case T2_OPCODE_SUB:
20625 new_inst = T2_OPCODE_ADD;
20629 case T2_OPCODE_ADD:
20630 new_inst = T2_OPCODE_SUB;
20634 /* ORR <-> ORN. Includes MOV <-> MVN. */
20635 case T2_OPCODE_ORR:
20636 new_inst = T2_OPCODE_ORN;
20640 case T2_OPCODE_ORN:
20641 new_inst = T2_OPCODE_ORR;
20645 /* AND <-> BIC. TST has no inverted equivalent. */
20646 case T2_OPCODE_AND:
20647 new_inst = T2_OPCODE_BIC;
20654 case T2_OPCODE_BIC:
20655 new_inst = T2_OPCODE_AND;
20660 case T2_OPCODE_ADC:
20661 new_inst = T2_OPCODE_SBC;
20665 case T2_OPCODE_SBC:
20666 new_inst = T2_OPCODE_ADC;
20670 /* We cannot do anything. */
20675 if (value == (unsigned int)FAIL)
20678 *instruction &= T2_OPCODE_MASK;
20679 *instruction |= new_inst << T2_DATA_OP_SHIFT;
20683 /* Read a 32-bit thumb instruction from buf. */
20684 static unsigned long
20685 get_thumb32_insn (char * buf)
20687 unsigned long insn;
20688 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
20689 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20695 /* We usually want to set the low bit on the address of thumb function
20696 symbols. In particular .word foo - . should have the low bit set.
20697 Generic code tries to fold the difference of two symbols to
20698 a constant. Prevent this and force a relocation when the first symbols
20699 is a thumb function. */
20702 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
20704 if (op == O_subtract
20705 && l->X_op == O_symbol
20706 && r->X_op == O_symbol
20707 && THUMB_IS_FUNC (l->X_add_symbol))
20709 l->X_op = O_subtract;
20710 l->X_op_symbol = r->X_add_symbol;
20711 l->X_add_number -= r->X_add_number;
20715 /* Process as normal. */
20719 /* Encode Thumb2 unconditional branches and calls. The encoding
20720 for the 2 are identical for the immediate values. */
20723 encode_thumb2_b_bl_offset (char * buf, offsetT value)
20725 #define T2I1I2MASK ((1 << 13) | (1 << 11))
20728 addressT S, I1, I2, lo, hi;
20730 S = (value >> 24) & 0x01;
20731 I1 = (value >> 23) & 0x01;
20732 I2 = (value >> 22) & 0x01;
20733 hi = (value >> 12) & 0x3ff;
20734 lo = (value >> 1) & 0x7ff;
20735 newval = md_chars_to_number (buf, THUMB_SIZE);
20736 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20737 newval |= (S << 10) | hi;
20738 newval2 &= ~T2I1I2MASK;
20739 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
20740 md_number_to_chars (buf, newval, THUMB_SIZE);
20741 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20745 md_apply_fix (fixS * fixP,
20749 offsetT value = * valP;
20751 unsigned int newimm;
20752 unsigned long temp;
20754 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
20756 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
20758 /* Note whether this will delete the relocation. */
20760 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
20763 /* On a 64-bit host, silently truncate 'value' to 32 bits for
20764 consistency with the behaviour on 32-bit hosts. Remember value
20766 value &= 0xffffffff;
20767 value ^= 0x80000000;
20768 value -= 0x80000000;
20771 fixP->fx_addnumber = value;
20773 /* Same treatment for fixP->fx_offset. */
20774 fixP->fx_offset &= 0xffffffff;
20775 fixP->fx_offset ^= 0x80000000;
20776 fixP->fx_offset -= 0x80000000;
20778 switch (fixP->fx_r_type)
20780 case BFD_RELOC_NONE:
20781 /* This will need to go in the object file. */
20785 case BFD_RELOC_ARM_IMMEDIATE:
20786 /* We claim that this fixup has been processed here,
20787 even if in fact we generate an error because we do
20788 not have a reloc for it, so tc_gen_reloc will reject it. */
20791 if (fixP->fx_addsy)
20793 const char *msg = 0;
20795 if (! S_IS_DEFINED (fixP->fx_addsy))
20796 msg = _("undefined symbol %s used as an immediate value");
20797 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20798 msg = _("symbol %s is in a different section");
20799 else if (S_IS_WEAK (fixP->fx_addsy))
20800 msg = _("symbol %s is weak and may be overridden later");
20804 as_bad_where (fixP->fx_file, fixP->fx_line,
20805 msg, S_GET_NAME (fixP->fx_addsy));
20810 temp = md_chars_to_number (buf, INSN_SIZE);
20812 /* If the offset is negative, we should use encoding A2 for ADR. */
20813 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
20814 newimm = negate_data_op (&temp, value);
20817 newimm = encode_arm_immediate (value);
20819 /* If the instruction will fail, see if we can fix things up by
20820 changing the opcode. */
20821 if (newimm == (unsigned int) FAIL)
20822 newimm = negate_data_op (&temp, value);
20825 if (newimm == (unsigned int) FAIL)
20827 as_bad_where (fixP->fx_file, fixP->fx_line,
20828 _("invalid constant (%lx) after fixup"),
20829 (unsigned long) value);
20833 newimm |= (temp & 0xfffff000);
20834 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20837 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20839 unsigned int highpart = 0;
20840 unsigned int newinsn = 0xe1a00000; /* nop. */
20842 if (fixP->fx_addsy)
20844 const char *msg = 0;
20846 if (! S_IS_DEFINED (fixP->fx_addsy))
20847 msg = _("undefined symbol %s used as an immediate value");
20848 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20849 msg = _("symbol %s is in a different section");
20850 else if (S_IS_WEAK (fixP->fx_addsy))
20851 msg = _("symbol %s is weak and may be overridden later");
20855 as_bad_where (fixP->fx_file, fixP->fx_line,
20856 msg, S_GET_NAME (fixP->fx_addsy));
20861 newimm = encode_arm_immediate (value);
20862 temp = md_chars_to_number (buf, INSN_SIZE);
20864 /* If the instruction will fail, see if we can fix things up by
20865 changing the opcode. */
20866 if (newimm == (unsigned int) FAIL
20867 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
20869 /* No ? OK - try using two ADD instructions to generate
20871 newimm = validate_immediate_twopart (value, & highpart);
20873 /* Yes - then make sure that the second instruction is
20875 if (newimm != (unsigned int) FAIL)
20877 /* Still No ? Try using a negated value. */
20878 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
20879 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
20880 /* Otherwise - give up. */
20883 as_bad_where (fixP->fx_file, fixP->fx_line,
20884 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20889 /* Replace the first operand in the 2nd instruction (which
20890 is the PC) with the destination register. We have
20891 already added in the PC in the first instruction and we
20892 do not want to do it again. */
20893 newinsn &= ~ 0xf0000;
20894 newinsn |= ((newinsn & 0x0f000) << 4);
20897 newimm |= (temp & 0xfffff000);
20898 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20900 highpart |= (newinsn & 0xfffff000);
20901 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
20905 case BFD_RELOC_ARM_OFFSET_IMM:
20906 if (!fixP->fx_done && seg->use_rela_p)
20909 case BFD_RELOC_ARM_LITERAL:
20915 if (validate_offset_imm (value, 0) == FAIL)
20917 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
20918 as_bad_where (fixP->fx_file, fixP->fx_line,
20919 _("invalid literal constant: pool needs to be closer"));
20921 as_bad_where (fixP->fx_file, fixP->fx_line,
20922 _("bad immediate value for offset (%ld)"),
20927 newval = md_chars_to_number (buf, INSN_SIZE);
20929 newval &= 0xfffff000;
20932 newval &= 0xff7ff000;
20933 newval |= value | (sign ? INDEX_UP : 0);
20935 md_number_to_chars (buf, newval, INSN_SIZE);
20938 case BFD_RELOC_ARM_OFFSET_IMM8:
20939 case BFD_RELOC_ARM_HWLITERAL:
20945 if (validate_offset_imm (value, 1) == FAIL)
20947 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
20948 as_bad_where (fixP->fx_file, fixP->fx_line,
20949 _("invalid literal constant: pool needs to be closer"));
20951 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20956 newval = md_chars_to_number (buf, INSN_SIZE);
20958 newval &= 0xfffff0f0;
20961 newval &= 0xff7ff0f0;
20962 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
20964 md_number_to_chars (buf, newval, INSN_SIZE);
20967 case BFD_RELOC_ARM_T32_OFFSET_U8:
20968 if (value < 0 || value > 1020 || value % 4 != 0)
20969 as_bad_where (fixP->fx_file, fixP->fx_line,
20970 _("bad immediate value for offset (%ld)"), (long) value);
20973 newval = md_chars_to_number (buf+2, THUMB_SIZE);
20975 md_number_to_chars (buf+2, newval, THUMB_SIZE);
20978 case BFD_RELOC_ARM_T32_OFFSET_IMM:
20979 /* This is a complicated relocation used for all varieties of Thumb32
20980 load/store instruction with immediate offset:
20982 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20983 *4, optional writeback(W)
20984 (doubleword load/store)
20986 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20987 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20988 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20989 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20990 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20992 Uppercase letters indicate bits that are already encoded at
20993 this point. Lowercase letters are our problem. For the
20994 second block of instructions, the secondary opcode nybble
20995 (bits 8..11) is present, and bit 23 is zero, even if this is
20996 a PC-relative operation. */
20997 newval = md_chars_to_number (buf, THUMB_SIZE);
20999 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
21001 if ((newval & 0xf0000000) == 0xe0000000)
21003 /* Doubleword load/store: 8-bit offset, scaled by 4. */
21005 newval |= (1 << 23);
21008 if (value % 4 != 0)
21010 as_bad_where (fixP->fx_file, fixP->fx_line,
21011 _("offset not a multiple of 4"));
21017 as_bad_where (fixP->fx_file, fixP->fx_line,
21018 _("offset out of range"));
21023 else if ((newval & 0x000f0000) == 0x000f0000)
21025 /* PC-relative, 12-bit offset. */
21027 newval |= (1 << 23);
21032 as_bad_where (fixP->fx_file, fixP->fx_line,
21033 _("offset out of range"));
21038 else if ((newval & 0x00000100) == 0x00000100)
21040 /* Writeback: 8-bit, +/- offset. */
21042 newval |= (1 << 9);
21047 as_bad_where (fixP->fx_file, fixP->fx_line,
21048 _("offset out of range"));
21053 else if ((newval & 0x00000f00) == 0x00000e00)
21055 /* T-instruction: positive 8-bit offset. */
21056 if (value < 0 || value > 0xff)
21058 as_bad_where (fixP->fx_file, fixP->fx_line,
21059 _("offset out of range"));
21067 /* Positive 12-bit or negative 8-bit offset. */
21071 newval |= (1 << 23);
21081 as_bad_where (fixP->fx_file, fixP->fx_line,
21082 _("offset out of range"));
21089 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
21090 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
21093 case BFD_RELOC_ARM_SHIFT_IMM:
21094 newval = md_chars_to_number (buf, INSN_SIZE);
21095 if (((unsigned long) value) > 32
21097 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
21099 as_bad_where (fixP->fx_file, fixP->fx_line,
21100 _("shift expression is too large"));
21105 /* Shifts of zero must be done as lsl. */
21107 else if (value == 32)
21109 newval &= 0xfffff07f;
21110 newval |= (value & 0x1f) << 7;
21111 md_number_to_chars (buf, newval, INSN_SIZE);
21114 case BFD_RELOC_ARM_T32_IMMEDIATE:
21115 case BFD_RELOC_ARM_T32_ADD_IMM:
21116 case BFD_RELOC_ARM_T32_IMM12:
21117 case BFD_RELOC_ARM_T32_ADD_PC12:
21118 /* We claim that this fixup has been processed here,
21119 even if in fact we generate an error because we do
21120 not have a reloc for it, so tc_gen_reloc will reject it. */
21124 && ! S_IS_DEFINED (fixP->fx_addsy))
21126 as_bad_where (fixP->fx_file, fixP->fx_line,
21127 _("undefined symbol %s used as an immediate value"),
21128 S_GET_NAME (fixP->fx_addsy));
21132 newval = md_chars_to_number (buf, THUMB_SIZE);
21134 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
21137 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
21138 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21140 newimm = encode_thumb32_immediate (value);
21141 if (newimm == (unsigned int) FAIL)
21142 newimm = thumb32_negate_data_op (&newval, value);
21144 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
21145 && newimm == (unsigned int) FAIL)
21147 /* Turn add/sum into addw/subw. */
21148 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
21149 newval = (newval & 0xfeffffff) | 0x02000000;
21150 /* No flat 12-bit imm encoding for addsw/subsw. */
21151 if ((newval & 0x00100000) == 0)
21153 /* 12 bit immediate for addw/subw. */
21157 newval ^= 0x00a00000;
21160 newimm = (unsigned int) FAIL;
21166 if (newimm == (unsigned int)FAIL)
21168 as_bad_where (fixP->fx_file, fixP->fx_line,
21169 _("invalid constant (%lx) after fixup"),
21170 (unsigned long) value);
21174 newval |= (newimm & 0x800) << 15;
21175 newval |= (newimm & 0x700) << 4;
21176 newval |= (newimm & 0x0ff);
21178 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
21179 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
21182 case BFD_RELOC_ARM_SMC:
21183 if (((unsigned long) value) > 0xffff)
21184 as_bad_where (fixP->fx_file, fixP->fx_line,
21185 _("invalid smc expression"));
21186 newval = md_chars_to_number (buf, INSN_SIZE);
21187 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21188 md_number_to_chars (buf, newval, INSN_SIZE);
21191 case BFD_RELOC_ARM_HVC:
21192 if (((unsigned long) value) > 0xffff)
21193 as_bad_where (fixP->fx_file, fixP->fx_line,
21194 _("invalid hvc expression"));
21195 newval = md_chars_to_number (buf, INSN_SIZE);
21196 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21197 md_number_to_chars (buf, newval, INSN_SIZE);
21200 case BFD_RELOC_ARM_SWI:
21201 if (fixP->tc_fix_data != 0)
21203 if (((unsigned long) value) > 0xff)
21204 as_bad_where (fixP->fx_file, fixP->fx_line,
21205 _("invalid swi expression"));
21206 newval = md_chars_to_number (buf, THUMB_SIZE);
21208 md_number_to_chars (buf, newval, THUMB_SIZE);
21212 if (((unsigned long) value) > 0x00ffffff)
21213 as_bad_where (fixP->fx_file, fixP->fx_line,
21214 _("invalid swi expression"));
21215 newval = md_chars_to_number (buf, INSN_SIZE);
21217 md_number_to_chars (buf, newval, INSN_SIZE);
21221 case BFD_RELOC_ARM_MULTI:
21222 if (((unsigned long) value) > 0xffff)
21223 as_bad_where (fixP->fx_file, fixP->fx_line,
21224 _("invalid expression in load/store multiple"));
21225 newval = value | md_chars_to_number (buf, INSN_SIZE);
21226 md_number_to_chars (buf, newval, INSN_SIZE);
21230 case BFD_RELOC_ARM_PCREL_CALL:
21232 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21234 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21235 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21236 && THUMB_IS_FUNC (fixP->fx_addsy))
21237 /* Flip the bl to blx. This is a simple flip
21238 bit here because we generate PCREL_CALL for
21239 unconditional bls. */
21241 newval = md_chars_to_number (buf, INSN_SIZE);
21242 newval = newval | 0x10000000;
21243 md_number_to_chars (buf, newval, INSN_SIZE);
21249 goto arm_branch_common;
21251 case BFD_RELOC_ARM_PCREL_JUMP:
21252 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21254 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21255 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21256 && THUMB_IS_FUNC (fixP->fx_addsy))
21258 /* This would map to a bl<cond>, b<cond>,
21259 b<always> to a Thumb function. We
21260 need to force a relocation for this particular
21262 newval = md_chars_to_number (buf, INSN_SIZE);
21266 case BFD_RELOC_ARM_PLT32:
21268 case BFD_RELOC_ARM_PCREL_BRANCH:
21270 goto arm_branch_common;
21272 case BFD_RELOC_ARM_PCREL_BLX:
21275 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21277 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21278 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21279 && ARM_IS_FUNC (fixP->fx_addsy))
21281 /* Flip the blx to a bl and warn. */
21282 const char *name = S_GET_NAME (fixP->fx_addsy);
21283 newval = 0xeb000000;
21284 as_warn_where (fixP->fx_file, fixP->fx_line,
21285 _("blx to '%s' an ARM ISA state function changed to bl"),
21287 md_number_to_chars (buf, newval, INSN_SIZE);
21293 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21294 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
21298 /* We are going to store value (shifted right by two) in the
21299 instruction, in a 24 bit, signed field. Bits 26 through 32 either
21300 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
21301 also be be clear. */
21303 as_bad_where (fixP->fx_file, fixP->fx_line,
21304 _("misaligned branch destination"));
21305 if ((value & (offsetT)0xfe000000) != (offsetT)0
21306 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
21307 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21309 if (fixP->fx_done || !seg->use_rela_p)
21311 newval = md_chars_to_number (buf, INSN_SIZE);
21312 newval |= (value >> 2) & 0x00ffffff;
21313 /* Set the H bit on BLX instructions. */
21317 newval |= 0x01000000;
21319 newval &= ~0x01000000;
21321 md_number_to_chars (buf, newval, INSN_SIZE);
21325 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
21326 /* CBZ can only branch forward. */
21328 /* Attempts to use CBZ to branch to the next instruction
21329 (which, strictly speaking, are prohibited) will be turned into
21332 FIXME: It may be better to remove the instruction completely and
21333 perform relaxation. */
21336 newval = md_chars_to_number (buf, THUMB_SIZE);
21337 newval = 0xbf00; /* NOP encoding T1 */
21338 md_number_to_chars (buf, newval, THUMB_SIZE);
21343 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21345 if (fixP->fx_done || !seg->use_rela_p)
21347 newval = md_chars_to_number (buf, THUMB_SIZE);
21348 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
21349 md_number_to_chars (buf, newval, THUMB_SIZE);
21354 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
21355 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
21356 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21358 if (fixP->fx_done || !seg->use_rela_p)
21360 newval = md_chars_to_number (buf, THUMB_SIZE);
21361 newval |= (value & 0x1ff) >> 1;
21362 md_number_to_chars (buf, newval, THUMB_SIZE);
21366 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
21367 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
21368 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21370 if (fixP->fx_done || !seg->use_rela_p)
21372 newval = md_chars_to_number (buf, THUMB_SIZE);
21373 newval |= (value & 0xfff) >> 1;
21374 md_number_to_chars (buf, newval, THUMB_SIZE);
21378 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21380 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21381 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21382 && ARM_IS_FUNC (fixP->fx_addsy)
21383 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21385 /* Force a relocation for a branch 20 bits wide. */
21388 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
21389 as_bad_where (fixP->fx_file, fixP->fx_line,
21390 _("conditional branch out of range"));
21392 if (fixP->fx_done || !seg->use_rela_p)
21395 addressT S, J1, J2, lo, hi;
21397 S = (value & 0x00100000) >> 20;
21398 J2 = (value & 0x00080000) >> 19;
21399 J1 = (value & 0x00040000) >> 18;
21400 hi = (value & 0x0003f000) >> 12;
21401 lo = (value & 0x00000ffe) >> 1;
21403 newval = md_chars_to_number (buf, THUMB_SIZE);
21404 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21405 newval |= (S << 10) | hi;
21406 newval2 |= (J1 << 13) | (J2 << 11) | lo;
21407 md_number_to_chars (buf, newval, THUMB_SIZE);
21408 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21412 case BFD_RELOC_THUMB_PCREL_BLX:
21413 /* If there is a blx from a thumb state function to
21414 another thumb function flip this to a bl and warn
21418 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21419 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21420 && THUMB_IS_FUNC (fixP->fx_addsy))
21422 const char *name = S_GET_NAME (fixP->fx_addsy);
21423 as_warn_where (fixP->fx_file, fixP->fx_line,
21424 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
21426 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21427 newval = newval | 0x1000;
21428 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21429 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21434 goto thumb_bl_common;
21436 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21437 /* A bl from Thumb state ISA to an internal ARM state function
21438 is converted to a blx. */
21440 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21441 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21442 && ARM_IS_FUNC (fixP->fx_addsy)
21443 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21445 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21446 newval = newval & ~0x1000;
21447 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21448 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
21455 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
21456 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21457 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21460 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21461 /* For a BLX instruction, make sure that the relocation is rounded up
21462 to a word boundary. This follows the semantics of the instruction
21463 which specifies that bit 1 of the target address will come from bit
21464 1 of the base address. */
21465 value = (value + 1) & ~ 1;
21467 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
21469 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
21470 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21471 else if ((value & ~0x1ffffff)
21472 && ((value & ~0x1ffffff) != ~0x1ffffff))
21473 as_bad_where (fixP->fx_file, fixP->fx_line,
21474 _("Thumb2 branch out of range"));
21477 if (fixP->fx_done || !seg->use_rela_p)
21478 encode_thumb2_b_bl_offset (buf, value);
21482 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21483 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
21484 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21486 if (fixP->fx_done || !seg->use_rela_p)
21487 encode_thumb2_b_bl_offset (buf, value);
21492 if (fixP->fx_done || !seg->use_rela_p)
21493 md_number_to_chars (buf, value, 1);
21497 if (fixP->fx_done || !seg->use_rela_p)
21498 md_number_to_chars (buf, value, 2);
21502 case BFD_RELOC_ARM_TLS_CALL:
21503 case BFD_RELOC_ARM_THM_TLS_CALL:
21504 case BFD_RELOC_ARM_TLS_DESCSEQ:
21505 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21506 S_SET_THREAD_LOCAL (fixP->fx_addsy);
21509 case BFD_RELOC_ARM_TLS_GOTDESC:
21510 case BFD_RELOC_ARM_TLS_GD32:
21511 case BFD_RELOC_ARM_TLS_LE32:
21512 case BFD_RELOC_ARM_TLS_IE32:
21513 case BFD_RELOC_ARM_TLS_LDM32:
21514 case BFD_RELOC_ARM_TLS_LDO32:
21515 S_SET_THREAD_LOCAL (fixP->fx_addsy);
21518 case BFD_RELOC_ARM_GOT32:
21519 case BFD_RELOC_ARM_GOTOFF:
21520 if (fixP->fx_done || !seg->use_rela_p)
21521 md_number_to_chars (buf, 0, 4);
21524 case BFD_RELOC_ARM_GOT_PREL:
21525 if (fixP->fx_done || !seg->use_rela_p)
21526 md_number_to_chars (buf, value, 4);
21529 case BFD_RELOC_ARM_TARGET2:
21530 /* TARGET2 is not partial-inplace, so we need to write the
21531 addend here for REL targets, because it won't be written out
21532 during reloc processing later. */
21533 if (fixP->fx_done || !seg->use_rela_p)
21534 md_number_to_chars (buf, fixP->fx_offset, 4);
21538 case BFD_RELOC_RVA:
21540 case BFD_RELOC_ARM_TARGET1:
21541 case BFD_RELOC_ARM_ROSEGREL32:
21542 case BFD_RELOC_ARM_SBREL32:
21543 case BFD_RELOC_32_PCREL:
21545 case BFD_RELOC_32_SECREL:
21547 if (fixP->fx_done || !seg->use_rela_p)
21549 /* For WinCE we only do this for pcrel fixups. */
21550 if (fixP->fx_done || fixP->fx_pcrel)
21552 md_number_to_chars (buf, value, 4);
21556 case BFD_RELOC_ARM_PREL31:
21557 if (fixP->fx_done || !seg->use_rela_p)
21559 newval = md_chars_to_number (buf, 4) & 0x80000000;
21560 if ((value ^ (value >> 1)) & 0x40000000)
21562 as_bad_where (fixP->fx_file, fixP->fx_line,
21563 _("rel31 relocation overflow"));
21565 newval |= value & 0x7fffffff;
21566 md_number_to_chars (buf, newval, 4);
21571 case BFD_RELOC_ARM_CP_OFF_IMM:
21572 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21573 if (value < -1023 || value > 1023 || (value & 3))
21574 as_bad_where (fixP->fx_file, fixP->fx_line,
21575 _("co-processor offset out of range"));
21580 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21581 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21582 newval = md_chars_to_number (buf, INSN_SIZE);
21584 newval = get_thumb32_insn (buf);
21586 newval &= 0xffffff00;
21589 newval &= 0xff7fff00;
21590 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
21592 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21593 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21594 md_number_to_chars (buf, newval, INSN_SIZE);
21596 put_thumb32_insn (buf, newval);
21599 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
21600 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
21601 if (value < -255 || value > 255)
21602 as_bad_where (fixP->fx_file, fixP->fx_line,
21603 _("co-processor offset out of range"));
21605 goto cp_off_common;
21607 case BFD_RELOC_ARM_THUMB_OFFSET:
21608 newval = md_chars_to_number (buf, THUMB_SIZE);
21609 /* Exactly what ranges, and where the offset is inserted depends
21610 on the type of instruction, we can establish this from the
21612 switch (newval >> 12)
21614 case 4: /* PC load. */
21615 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
21616 forced to zero for these loads; md_pcrel_from has already
21617 compensated for this. */
21619 as_bad_where (fixP->fx_file, fixP->fx_line,
21620 _("invalid offset, target not word aligned (0x%08lX)"),
21621 (((unsigned long) fixP->fx_frag->fr_address
21622 + (unsigned long) fixP->fx_where) & ~3)
21623 + (unsigned long) value);
21625 if (value & ~0x3fc)
21626 as_bad_where (fixP->fx_file, fixP->fx_line,
21627 _("invalid offset, value too big (0x%08lX)"),
21630 newval |= value >> 2;
21633 case 9: /* SP load/store. */
21634 if (value & ~0x3fc)
21635 as_bad_where (fixP->fx_file, fixP->fx_line,
21636 _("invalid offset, value too big (0x%08lX)"),
21638 newval |= value >> 2;
21641 case 6: /* Word load/store. */
21643 as_bad_where (fixP->fx_file, fixP->fx_line,
21644 _("invalid offset, value too big (0x%08lX)"),
21646 newval |= value << 4; /* 6 - 2. */
21649 case 7: /* Byte load/store. */
21651 as_bad_where (fixP->fx_file, fixP->fx_line,
21652 _("invalid offset, value too big (0x%08lX)"),
21654 newval |= value << 6;
21657 case 8: /* Halfword load/store. */
21659 as_bad_where (fixP->fx_file, fixP->fx_line,
21660 _("invalid offset, value too big (0x%08lX)"),
21662 newval |= value << 5; /* 6 - 1. */
21666 as_bad_where (fixP->fx_file, fixP->fx_line,
21667 "Unable to process relocation for thumb opcode: %lx",
21668 (unsigned long) newval);
21671 md_number_to_chars (buf, newval, THUMB_SIZE);
21674 case BFD_RELOC_ARM_THUMB_ADD:
21675 /* This is a complicated relocation, since we use it for all of
21676 the following immediate relocations:
21680 9bit ADD/SUB SP word-aligned
21681 10bit ADD PC/SP word-aligned
21683 The type of instruction being processed is encoded in the
21690 newval = md_chars_to_number (buf, THUMB_SIZE);
21692 int rd = (newval >> 4) & 0xf;
21693 int rs = newval & 0xf;
21694 int subtract = !!(newval & 0x8000);
21696 /* Check for HI regs, only very restricted cases allowed:
21697 Adjusting SP, and using PC or SP to get an address. */
21698 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
21699 || (rs > 7 && rs != REG_SP && rs != REG_PC))
21700 as_bad_where (fixP->fx_file, fixP->fx_line,
21701 _("invalid Hi register with immediate"));
21703 /* If value is negative, choose the opposite instruction. */
21707 subtract = !subtract;
21709 as_bad_where (fixP->fx_file, fixP->fx_line,
21710 _("immediate value out of range"));
21715 if (value & ~0x1fc)
21716 as_bad_where (fixP->fx_file, fixP->fx_line,
21717 _("invalid immediate for stack address calculation"));
21718 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
21719 newval |= value >> 2;
21721 else if (rs == REG_PC || rs == REG_SP)
21723 if (subtract || value & ~0x3fc)
21724 as_bad_where (fixP->fx_file, fixP->fx_line,
21725 _("invalid immediate for address calculation (value = 0x%08lX)"),
21726 (unsigned long) value);
21727 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
21729 newval |= value >> 2;
21734 as_bad_where (fixP->fx_file, fixP->fx_line,
21735 _("immediate value out of range"));
21736 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
21737 newval |= (rd << 8) | value;
21742 as_bad_where (fixP->fx_file, fixP->fx_line,
21743 _("immediate value out of range"));
21744 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
21745 newval |= rd | (rs << 3) | (value << 6);
21748 md_number_to_chars (buf, newval, THUMB_SIZE);
21751 case BFD_RELOC_ARM_THUMB_IMM:
21752 newval = md_chars_to_number (buf, THUMB_SIZE);
21753 if (value < 0 || value > 255)
21754 as_bad_where (fixP->fx_file, fixP->fx_line,
21755 _("invalid immediate: %ld is out of range"),
21758 md_number_to_chars (buf, newval, THUMB_SIZE);
21761 case BFD_RELOC_ARM_THUMB_SHIFT:
21762 /* 5bit shift value (0..32). LSL cannot take 32. */
21763 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
21764 temp = newval & 0xf800;
21765 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
21766 as_bad_where (fixP->fx_file, fixP->fx_line,
21767 _("invalid shift value: %ld"), (long) value);
21768 /* Shifts of zero must be encoded as LSL. */
21770 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
21771 /* Shifts of 32 are encoded as zero. */
21772 else if (value == 32)
21774 newval |= value << 6;
21775 md_number_to_chars (buf, newval, THUMB_SIZE);
21778 case BFD_RELOC_VTABLE_INHERIT:
21779 case BFD_RELOC_VTABLE_ENTRY:
21783 case BFD_RELOC_ARM_MOVW:
21784 case BFD_RELOC_ARM_MOVT:
21785 case BFD_RELOC_ARM_THUMB_MOVW:
21786 case BFD_RELOC_ARM_THUMB_MOVT:
21787 if (fixP->fx_done || !seg->use_rela_p)
21789 /* REL format relocations are limited to a 16-bit addend. */
21790 if (!fixP->fx_done)
21792 if (value < -0x8000 || value > 0x7fff)
21793 as_bad_where (fixP->fx_file, fixP->fx_line,
21794 _("offset out of range"));
21796 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21797 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21802 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21803 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21805 newval = get_thumb32_insn (buf);
21806 newval &= 0xfbf08f00;
21807 newval |= (value & 0xf000) << 4;
21808 newval |= (value & 0x0800) << 15;
21809 newval |= (value & 0x0700) << 4;
21810 newval |= (value & 0x00ff);
21811 put_thumb32_insn (buf, newval);
21815 newval = md_chars_to_number (buf, 4);
21816 newval &= 0xfff0f000;
21817 newval |= value & 0x0fff;
21818 newval |= (value & 0xf000) << 4;
21819 md_number_to_chars (buf, newval, 4);
21824 case BFD_RELOC_ARM_ALU_PC_G0_NC:
21825 case BFD_RELOC_ARM_ALU_PC_G0:
21826 case BFD_RELOC_ARM_ALU_PC_G1_NC:
21827 case BFD_RELOC_ARM_ALU_PC_G1:
21828 case BFD_RELOC_ARM_ALU_PC_G2:
21829 case BFD_RELOC_ARM_ALU_SB_G0_NC:
21830 case BFD_RELOC_ARM_ALU_SB_G0:
21831 case BFD_RELOC_ARM_ALU_SB_G1_NC:
21832 case BFD_RELOC_ARM_ALU_SB_G1:
21833 case BFD_RELOC_ARM_ALU_SB_G2:
21834 gas_assert (!fixP->fx_done);
21835 if (!seg->use_rela_p)
21838 bfd_vma encoded_addend;
21839 bfd_vma addend_abs = abs (value);
21841 /* Check that the absolute value of the addend can be
21842 expressed as an 8-bit constant plus a rotation. */
21843 encoded_addend = encode_arm_immediate (addend_abs);
21844 if (encoded_addend == (unsigned int) FAIL)
21845 as_bad_where (fixP->fx_file, fixP->fx_line,
21846 _("the offset 0x%08lX is not representable"),
21847 (unsigned long) addend_abs);
21849 /* Extract the instruction. */
21850 insn = md_chars_to_number (buf, INSN_SIZE);
21852 /* If the addend is positive, use an ADD instruction.
21853 Otherwise use a SUB. Take care not to destroy the S bit. */
21854 insn &= 0xff1fffff;
21860 /* Place the encoded addend into the first 12 bits of the
21862 insn &= 0xfffff000;
21863 insn |= encoded_addend;
21865 /* Update the instruction. */
21866 md_number_to_chars (buf, insn, INSN_SIZE);
21870 case BFD_RELOC_ARM_LDR_PC_G0:
21871 case BFD_RELOC_ARM_LDR_PC_G1:
21872 case BFD_RELOC_ARM_LDR_PC_G2:
21873 case BFD_RELOC_ARM_LDR_SB_G0:
21874 case BFD_RELOC_ARM_LDR_SB_G1:
21875 case BFD_RELOC_ARM_LDR_SB_G2:
21876 gas_assert (!fixP->fx_done);
21877 if (!seg->use_rela_p)
21880 bfd_vma addend_abs = abs (value);
21882 /* Check that the absolute value of the addend can be
21883 encoded in 12 bits. */
21884 if (addend_abs >= 0x1000)
21885 as_bad_where (fixP->fx_file, fixP->fx_line,
21886 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
21887 (unsigned long) addend_abs);
21889 /* Extract the instruction. */
21890 insn = md_chars_to_number (buf, INSN_SIZE);
21892 /* If the addend is negative, clear bit 23 of the instruction.
21893 Otherwise set it. */
21895 insn &= ~(1 << 23);
21899 /* Place the absolute value of the addend into the first 12 bits
21900 of the instruction. */
21901 insn &= 0xfffff000;
21902 insn |= addend_abs;
21904 /* Update the instruction. */
21905 md_number_to_chars (buf, insn, INSN_SIZE);
21909 case BFD_RELOC_ARM_LDRS_PC_G0:
21910 case BFD_RELOC_ARM_LDRS_PC_G1:
21911 case BFD_RELOC_ARM_LDRS_PC_G2:
21912 case BFD_RELOC_ARM_LDRS_SB_G0:
21913 case BFD_RELOC_ARM_LDRS_SB_G1:
21914 case BFD_RELOC_ARM_LDRS_SB_G2:
21915 gas_assert (!fixP->fx_done);
21916 if (!seg->use_rela_p)
21919 bfd_vma addend_abs = abs (value);
21921 /* Check that the absolute value of the addend can be
21922 encoded in 8 bits. */
21923 if (addend_abs >= 0x100)
21924 as_bad_where (fixP->fx_file, fixP->fx_line,
21925 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21926 (unsigned long) addend_abs);
21928 /* Extract the instruction. */
21929 insn = md_chars_to_number (buf, INSN_SIZE);
21931 /* If the addend is negative, clear bit 23 of the instruction.
21932 Otherwise set it. */
21934 insn &= ~(1 << 23);
21938 /* Place the first four bits of the absolute value of the addend
21939 into the first 4 bits of the instruction, and the remaining
21940 four into bits 8 .. 11. */
21941 insn &= 0xfffff0f0;
21942 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
21944 /* Update the instruction. */
21945 md_number_to_chars (buf, insn, INSN_SIZE);
21949 case BFD_RELOC_ARM_LDC_PC_G0:
21950 case BFD_RELOC_ARM_LDC_PC_G1:
21951 case BFD_RELOC_ARM_LDC_PC_G2:
21952 case BFD_RELOC_ARM_LDC_SB_G0:
21953 case BFD_RELOC_ARM_LDC_SB_G1:
21954 case BFD_RELOC_ARM_LDC_SB_G2:
21955 gas_assert (!fixP->fx_done);
21956 if (!seg->use_rela_p)
21959 bfd_vma addend_abs = abs (value);
21961 /* Check that the absolute value of the addend is a multiple of
21962 four and, when divided by four, fits in 8 bits. */
21963 if (addend_abs & 0x3)
21964 as_bad_where (fixP->fx_file, fixP->fx_line,
21965 _("bad offset 0x%08lX (must be word-aligned)"),
21966 (unsigned long) addend_abs);
21968 if ((addend_abs >> 2) > 0xff)
21969 as_bad_where (fixP->fx_file, fixP->fx_line,
21970 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21971 (unsigned long) addend_abs);
21973 /* Extract the instruction. */
21974 insn = md_chars_to_number (buf, INSN_SIZE);
21976 /* If the addend is negative, clear bit 23 of the instruction.
21977 Otherwise set it. */
21979 insn &= ~(1 << 23);
21983 /* Place the addend (divided by four) into the first eight
21984 bits of the instruction. */
21985 insn &= 0xfffffff0;
21986 insn |= addend_abs >> 2;
21988 /* Update the instruction. */
21989 md_number_to_chars (buf, insn, INSN_SIZE);
21993 case BFD_RELOC_ARM_V4BX:
21994 /* This will need to go in the object file. */
21998 case BFD_RELOC_UNUSED:
22000 as_bad_where (fixP->fx_file, fixP->fx_line,
22001 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
22005 /* Translate internal representation of relocation info to BFD target
22009 tc_gen_reloc (asection *section, fixS *fixp)
22012 bfd_reloc_code_real_type code;
22014 reloc = (arelent *) xmalloc (sizeof (arelent));
22016 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
22017 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
22018 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
22020 if (fixp->fx_pcrel)
22022 if (section->use_rela_p)
22023 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
22025 fixp->fx_offset = reloc->address;
22027 reloc->addend = fixp->fx_offset;
22029 switch (fixp->fx_r_type)
22032 if (fixp->fx_pcrel)
22034 code = BFD_RELOC_8_PCREL;
22039 if (fixp->fx_pcrel)
22041 code = BFD_RELOC_16_PCREL;
22046 if (fixp->fx_pcrel)
22048 code = BFD_RELOC_32_PCREL;
22052 case BFD_RELOC_ARM_MOVW:
22053 if (fixp->fx_pcrel)
22055 code = BFD_RELOC_ARM_MOVW_PCREL;
22059 case BFD_RELOC_ARM_MOVT:
22060 if (fixp->fx_pcrel)
22062 code = BFD_RELOC_ARM_MOVT_PCREL;
22066 case BFD_RELOC_ARM_THUMB_MOVW:
22067 if (fixp->fx_pcrel)
22069 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
22073 case BFD_RELOC_ARM_THUMB_MOVT:
22074 if (fixp->fx_pcrel)
22076 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
22080 case BFD_RELOC_NONE:
22081 case BFD_RELOC_ARM_PCREL_BRANCH:
22082 case BFD_RELOC_ARM_PCREL_BLX:
22083 case BFD_RELOC_RVA:
22084 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22085 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22086 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22087 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22088 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22089 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22090 case BFD_RELOC_VTABLE_ENTRY:
22091 case BFD_RELOC_VTABLE_INHERIT:
22093 case BFD_RELOC_32_SECREL:
22095 code = fixp->fx_r_type;
22098 case BFD_RELOC_THUMB_PCREL_BLX:
22100 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22101 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
22104 code = BFD_RELOC_THUMB_PCREL_BLX;
22107 case BFD_RELOC_ARM_LITERAL:
22108 case BFD_RELOC_ARM_HWLITERAL:
22109 /* If this is called then the a literal has
22110 been referenced across a section boundary. */
22111 as_bad_where (fixp->fx_file, fixp->fx_line,
22112 _("literal referenced across section boundary"));
22116 case BFD_RELOC_ARM_TLS_CALL:
22117 case BFD_RELOC_ARM_THM_TLS_CALL:
22118 case BFD_RELOC_ARM_TLS_DESCSEQ:
22119 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22120 case BFD_RELOC_ARM_GOT32:
22121 case BFD_RELOC_ARM_GOTOFF:
22122 case BFD_RELOC_ARM_GOT_PREL:
22123 case BFD_RELOC_ARM_PLT32:
22124 case BFD_RELOC_ARM_TARGET1:
22125 case BFD_RELOC_ARM_ROSEGREL32:
22126 case BFD_RELOC_ARM_SBREL32:
22127 case BFD_RELOC_ARM_PREL31:
22128 case BFD_RELOC_ARM_TARGET2:
22129 case BFD_RELOC_ARM_TLS_LE32:
22130 case BFD_RELOC_ARM_TLS_LDO32:
22131 case BFD_RELOC_ARM_PCREL_CALL:
22132 case BFD_RELOC_ARM_PCREL_JUMP:
22133 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22134 case BFD_RELOC_ARM_ALU_PC_G0:
22135 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22136 case BFD_RELOC_ARM_ALU_PC_G1:
22137 case BFD_RELOC_ARM_ALU_PC_G2:
22138 case BFD_RELOC_ARM_LDR_PC_G0:
22139 case BFD_RELOC_ARM_LDR_PC_G1:
22140 case BFD_RELOC_ARM_LDR_PC_G2:
22141 case BFD_RELOC_ARM_LDRS_PC_G0:
22142 case BFD_RELOC_ARM_LDRS_PC_G1:
22143 case BFD_RELOC_ARM_LDRS_PC_G2:
22144 case BFD_RELOC_ARM_LDC_PC_G0:
22145 case BFD_RELOC_ARM_LDC_PC_G1:
22146 case BFD_RELOC_ARM_LDC_PC_G2:
22147 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22148 case BFD_RELOC_ARM_ALU_SB_G0:
22149 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22150 case BFD_RELOC_ARM_ALU_SB_G1:
22151 case BFD_RELOC_ARM_ALU_SB_G2:
22152 case BFD_RELOC_ARM_LDR_SB_G0:
22153 case BFD_RELOC_ARM_LDR_SB_G1:
22154 case BFD_RELOC_ARM_LDR_SB_G2:
22155 case BFD_RELOC_ARM_LDRS_SB_G0:
22156 case BFD_RELOC_ARM_LDRS_SB_G1:
22157 case BFD_RELOC_ARM_LDRS_SB_G2:
22158 case BFD_RELOC_ARM_LDC_SB_G0:
22159 case BFD_RELOC_ARM_LDC_SB_G1:
22160 case BFD_RELOC_ARM_LDC_SB_G2:
22161 case BFD_RELOC_ARM_V4BX:
22162 code = fixp->fx_r_type;
22165 case BFD_RELOC_ARM_TLS_GOTDESC:
22166 case BFD_RELOC_ARM_TLS_GD32:
22167 case BFD_RELOC_ARM_TLS_IE32:
22168 case BFD_RELOC_ARM_TLS_LDM32:
22169 /* BFD will include the symbol's address in the addend.
22170 But we don't want that, so subtract it out again here. */
22171 if (!S_IS_COMMON (fixp->fx_addsy))
22172 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
22173 code = fixp->fx_r_type;
22177 case BFD_RELOC_ARM_IMMEDIATE:
22178 as_bad_where (fixp->fx_file, fixp->fx_line,
22179 _("internal relocation (type: IMMEDIATE) not fixed up"));
22182 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22183 as_bad_where (fixp->fx_file, fixp->fx_line,
22184 _("ADRL used for a symbol not defined in the same file"));
22187 case BFD_RELOC_ARM_OFFSET_IMM:
22188 if (section->use_rela_p)
22190 code = fixp->fx_r_type;
22194 if (fixp->fx_addsy != NULL
22195 && !S_IS_DEFINED (fixp->fx_addsy)
22196 && S_IS_LOCAL (fixp->fx_addsy))
22198 as_bad_where (fixp->fx_file, fixp->fx_line,
22199 _("undefined local label `%s'"),
22200 S_GET_NAME (fixp->fx_addsy));
22204 as_bad_where (fixp->fx_file, fixp->fx_line,
22205 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
22212 switch (fixp->fx_r_type)
22214 case BFD_RELOC_NONE: type = "NONE"; break;
22215 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
22216 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
22217 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
22218 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
22219 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
22220 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
22221 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
22222 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
22223 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
22224 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
22225 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
22226 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
22227 default: type = _("<unknown>"); break;
22229 as_bad_where (fixp->fx_file, fixp->fx_line,
22230 _("cannot represent %s relocation in this object file format"),
22237 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
22239 && fixp->fx_addsy == GOT_symbol)
22241 code = BFD_RELOC_ARM_GOTPC;
22242 reloc->addend = fixp->fx_offset = reloc->address;
22246 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
22248 if (reloc->howto == NULL)
22250 as_bad_where (fixp->fx_file, fixp->fx_line,
22251 _("cannot represent %s relocation in this object file format"),
22252 bfd_get_reloc_code_name (code));
22256 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
22257 vtable entry to be used in the relocation's section offset. */
22258 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22259 reloc->address = fixp->fx_offset;
22264 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
22267 cons_fix_new_arm (fragS * frag,
22272 bfd_reloc_code_real_type type;
22276 FIXME: @@ Should look at CPU word size. */
22280 type = BFD_RELOC_8;
22283 type = BFD_RELOC_16;
22287 type = BFD_RELOC_32;
22290 type = BFD_RELOC_64;
22295 if (exp->X_op == O_secrel)
22297 exp->X_op = O_symbol;
22298 type = BFD_RELOC_32_SECREL;
22302 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
22305 #if defined (OBJ_COFF)
22307 arm_validate_fix (fixS * fixP)
22309 /* If the destination of the branch is a defined symbol which does not have
22310 the THUMB_FUNC attribute, then we must be calling a function which has
22311 the (interfacearm) attribute. We look for the Thumb entry point to that
22312 function and change the branch to refer to that function instead. */
22313 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
22314 && fixP->fx_addsy != NULL
22315 && S_IS_DEFINED (fixP->fx_addsy)
22316 && ! THUMB_IS_FUNC (fixP->fx_addsy))
22318 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
22325 arm_force_relocation (struct fix * fixp)
22327 #if defined (OBJ_COFF) && defined (TE_PE)
22328 if (fixp->fx_r_type == BFD_RELOC_RVA)
22332 /* In case we have a call or a branch to a function in ARM ISA mode from
22333 a thumb function or vice-versa force the relocation. These relocations
22334 are cleared off for some cores that might have blx and simple transformations
22338 switch (fixp->fx_r_type)
22340 case BFD_RELOC_ARM_PCREL_JUMP:
22341 case BFD_RELOC_ARM_PCREL_CALL:
22342 case BFD_RELOC_THUMB_PCREL_BLX:
22343 if (THUMB_IS_FUNC (fixp->fx_addsy))
22347 case BFD_RELOC_ARM_PCREL_BLX:
22348 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22349 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22350 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22351 if (ARM_IS_FUNC (fixp->fx_addsy))
22360 /* Resolve these relocations even if the symbol is extern or weak.
22361 Technically this is probably wrong due to symbol preemption.
22362 In practice these relocations do not have enough range to be useful
22363 at dynamic link time, and some code (e.g. in the Linux kernel)
22364 expects these references to be resolved. */
22365 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
22366 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
22367 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
22368 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
22369 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22370 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
22371 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
22372 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
22373 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22374 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
22375 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
22376 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
22377 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
22378 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
22381 /* Always leave these relocations for the linker. */
22382 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22383 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22384 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22387 /* Always generate relocations against function symbols. */
22388 if (fixp->fx_r_type == BFD_RELOC_32
22390 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
22393 return generic_force_reloc (fixp);
22396 #if defined (OBJ_ELF) || defined (OBJ_COFF)
22397 /* Relocations against function names must be left unadjusted,
22398 so that the linker can use this information to generate interworking
22399 stubs. The MIPS version of this function
22400 also prevents relocations that are mips-16 specific, but I do not
22401 know why it does this.
22404 There is one other problem that ought to be addressed here, but
22405 which currently is not: Taking the address of a label (rather
22406 than a function) and then later jumping to that address. Such
22407 addresses also ought to have their bottom bit set (assuming that
22408 they reside in Thumb code), but at the moment they will not. */
22411 arm_fix_adjustable (fixS * fixP)
22413 if (fixP->fx_addsy == NULL)
22416 /* Preserve relocations against symbols with function type. */
22417 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
22420 if (THUMB_IS_FUNC (fixP->fx_addsy)
22421 && fixP->fx_subsy == NULL)
22424 /* We need the symbol name for the VTABLE entries. */
22425 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
22426 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22429 /* Don't allow symbols to be discarded on GOT related relocs. */
22430 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
22431 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
22432 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
22433 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
22434 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
22435 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
22436 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
22437 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
22438 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
22439 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
22440 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
22441 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
22442 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
22443 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
22446 /* Similarly for group relocations. */
22447 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22448 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22449 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22452 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
22453 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
22454 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22455 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
22456 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
22457 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22458 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
22459 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
22460 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
22465 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
22470 elf32_arm_target_format (void)
22473 return (target_big_endian
22474 ? "elf32-bigarm-symbian"
22475 : "elf32-littlearm-symbian");
22476 #elif defined (TE_VXWORKS)
22477 return (target_big_endian
22478 ? "elf32-bigarm-vxworks"
22479 : "elf32-littlearm-vxworks");
22480 #elif defined (TE_NACL)
22481 return (target_big_endian
22482 ? "elf32-bigarm-nacl"
22483 : "elf32-littlearm-nacl");
22485 if (target_big_endian)
22486 return "elf32-bigarm";
22488 return "elf32-littlearm";
22493 armelf_frob_symbol (symbolS * symp,
22496 elf_frob_symbol (symp, puntp);
22500 /* MD interface: Finalization. */
22505 literal_pool * pool;
22507 /* Ensure that all the IT blocks are properly closed. */
22508 check_it_blocks_finished ();
22510 for (pool = list_of_pools; pool; pool = pool->next)
22512 /* Put it at the end of the relevant section. */
22513 subseg_set (pool->section, pool->sub_section);
22515 arm_elf_change_section ();
22522 /* Remove any excess mapping symbols generated for alignment frags in
22523 SEC. We may have created a mapping symbol before a zero byte
22524 alignment; remove it if there's a mapping symbol after the
22527 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
22528 void *dummy ATTRIBUTE_UNUSED)
22530 segment_info_type *seginfo = seg_info (sec);
22533 if (seginfo == NULL || seginfo->frchainP == NULL)
22536 for (fragp = seginfo->frchainP->frch_root;
22538 fragp = fragp->fr_next)
22540 symbolS *sym = fragp->tc_frag_data.last_map;
22541 fragS *next = fragp->fr_next;
22543 /* Variable-sized frags have been converted to fixed size by
22544 this point. But if this was variable-sized to start with,
22545 there will be a fixed-size frag after it. So don't handle
22547 if (sym == NULL || next == NULL)
22550 if (S_GET_VALUE (sym) < next->fr_address)
22551 /* Not at the end of this frag. */
22553 know (S_GET_VALUE (sym) == next->fr_address);
22557 if (next->tc_frag_data.first_map != NULL)
22559 /* Next frag starts with a mapping symbol. Discard this
22561 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22565 if (next->fr_next == NULL)
22567 /* This mapping symbol is at the end of the section. Discard
22569 know (next->fr_fix == 0 && next->fr_var == 0);
22570 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22574 /* As long as we have empty frags without any mapping symbols,
22576 /* If the next frag is non-empty and does not start with a
22577 mapping symbol, then this mapping symbol is required. */
22578 if (next->fr_address != next->fr_next->fr_address)
22581 next = next->fr_next;
22583 while (next != NULL);
22588 /* Adjust the symbol table. This marks Thumb symbols as distinct from
22592 arm_adjust_symtab (void)
22597 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22599 if (ARM_IS_THUMB (sym))
22601 if (THUMB_IS_FUNC (sym))
22603 /* Mark the symbol as a Thumb function. */
22604 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
22605 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
22606 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
22608 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
22609 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
22611 as_bad (_("%s: unexpected function type: %d"),
22612 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
22614 else switch (S_GET_STORAGE_CLASS (sym))
22617 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
22620 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
22623 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
22631 if (ARM_IS_INTERWORK (sym))
22632 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
22639 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22641 if (ARM_IS_THUMB (sym))
22643 elf_symbol_type * elf_sym;
22645 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
22646 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
22648 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
22649 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
22651 /* If it's a .thumb_func, declare it as so,
22652 otherwise tag label as .code 16. */
22653 if (THUMB_IS_FUNC (sym))
22654 elf_sym->internal_elf_sym.st_target_internal
22655 = ST_BRANCH_TO_THUMB;
22656 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22657 elf_sym->internal_elf_sym.st_info =
22658 ELF_ST_INFO (bind, STT_ARM_16BIT);
22663 /* Remove any overlapping mapping symbols generated by alignment frags. */
22664 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
22665 /* Now do generic ELF adjustments. */
22666 elf_adjust_symtab ();
22670 /* MD interface: Initialization. */
22673 set_constant_flonums (void)
22677 for (i = 0; i < NUM_FLOAT_VALS; i++)
22678 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
22682 /* Auto-select Thumb mode if it's the only available instruction set for the
22683 given architecture. */
22686 autoselect_thumb_from_cpu_variant (void)
22688 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
22689 opcode_select (16);
22698 if ( (arm_ops_hsh = hash_new ()) == NULL
22699 || (arm_cond_hsh = hash_new ()) == NULL
22700 || (arm_shift_hsh = hash_new ()) == NULL
22701 || (arm_psr_hsh = hash_new ()) == NULL
22702 || (arm_v7m_psr_hsh = hash_new ()) == NULL
22703 || (arm_reg_hsh = hash_new ()) == NULL
22704 || (arm_reloc_hsh = hash_new ()) == NULL
22705 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
22706 as_fatal (_("virtual memory exhausted"));
22708 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
22709 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
22710 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
22711 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
22712 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
22713 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
22714 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
22715 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
22716 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
22717 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
22718 (void *) (v7m_psrs + i));
22719 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
22720 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
22722 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
22724 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
22725 (void *) (barrier_opt_names + i));
22727 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
22729 struct reloc_entry * entry = reloc_names + i;
22731 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
22732 /* This makes encode_branch() use the EABI versions of this relocation. */
22733 entry->reloc = BFD_RELOC_UNUSED;
22735 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
22739 set_constant_flonums ();
22741 /* Set the cpu variant based on the command-line options. We prefer
22742 -mcpu= over -march= if both are set (as for GCC); and we prefer
22743 -mfpu= over any other way of setting the floating point unit.
22744 Use of legacy options with new options are faulted. */
22747 if (mcpu_cpu_opt || march_cpu_opt)
22748 as_bad (_("use of old and new-style options to set CPU type"));
22750 mcpu_cpu_opt = legacy_cpu;
22752 else if (!mcpu_cpu_opt)
22753 mcpu_cpu_opt = march_cpu_opt;
22758 as_bad (_("use of old and new-style options to set FPU type"));
22760 mfpu_opt = legacy_fpu;
22762 else if (!mfpu_opt)
22764 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
22765 || defined (TE_NetBSD) || defined (TE_VXWORKS))
22766 /* Some environments specify a default FPU. If they don't, infer it
22767 from the processor. */
22769 mfpu_opt = mcpu_fpu_opt;
22771 mfpu_opt = march_fpu_opt;
22773 mfpu_opt = &fpu_default;
22779 if (mcpu_cpu_opt != NULL)
22780 mfpu_opt = &fpu_default;
22781 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
22782 mfpu_opt = &fpu_arch_vfp_v2;
22784 mfpu_opt = &fpu_arch_fpa;
22790 mcpu_cpu_opt = &cpu_default;
22791 selected_cpu = cpu_default;
22795 selected_cpu = *mcpu_cpu_opt;
22797 mcpu_cpu_opt = &arm_arch_any;
22800 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22802 autoselect_thumb_from_cpu_variant ();
22804 arm_arch_used = thumb_arch_used = arm_arch_none;
22806 #if defined OBJ_COFF || defined OBJ_ELF
22808 unsigned int flags = 0;
22810 #if defined OBJ_ELF
22811 flags = meabi_flags;
22813 switch (meabi_flags)
22815 case EF_ARM_EABI_UNKNOWN:
22817 /* Set the flags in the private structure. */
22818 if (uses_apcs_26) flags |= F_APCS26;
22819 if (support_interwork) flags |= F_INTERWORK;
22820 if (uses_apcs_float) flags |= F_APCS_FLOAT;
22821 if (pic_code) flags |= F_PIC;
22822 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
22823 flags |= F_SOFT_FLOAT;
22825 switch (mfloat_abi_opt)
22827 case ARM_FLOAT_ABI_SOFT:
22828 case ARM_FLOAT_ABI_SOFTFP:
22829 flags |= F_SOFT_FLOAT;
22832 case ARM_FLOAT_ABI_HARD:
22833 if (flags & F_SOFT_FLOAT)
22834 as_bad (_("hard-float conflicts with specified fpu"));
22838 /* Using pure-endian doubles (even if soft-float). */
22839 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
22840 flags |= F_VFP_FLOAT;
22842 #if defined OBJ_ELF
22843 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
22844 flags |= EF_ARM_MAVERICK_FLOAT;
22847 case EF_ARM_EABI_VER4:
22848 case EF_ARM_EABI_VER5:
22849 /* No additional flags to set. */
22856 bfd_set_private_flags (stdoutput, flags);
22858 /* We have run out flags in the COFF header to encode the
22859 status of ATPCS support, so instead we create a dummy,
22860 empty, debug section called .arm.atpcs. */
22865 sec = bfd_make_section (stdoutput, ".arm.atpcs");
22869 bfd_set_section_flags
22870 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
22871 bfd_set_section_size (stdoutput, sec, 0);
22872 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
22878 /* Record the CPU type as well. */
22879 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
22880 mach = bfd_mach_arm_iWMMXt2;
22881 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
22882 mach = bfd_mach_arm_iWMMXt;
22883 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
22884 mach = bfd_mach_arm_XScale;
22885 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
22886 mach = bfd_mach_arm_ep9312;
22887 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
22888 mach = bfd_mach_arm_5TE;
22889 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
22891 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22892 mach = bfd_mach_arm_5T;
22894 mach = bfd_mach_arm_5;
22896 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
22898 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22899 mach = bfd_mach_arm_4T;
22901 mach = bfd_mach_arm_4;
22903 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
22904 mach = bfd_mach_arm_3M;
22905 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
22906 mach = bfd_mach_arm_3;
22907 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
22908 mach = bfd_mach_arm_2a;
22909 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
22910 mach = bfd_mach_arm_2;
22912 mach = bfd_mach_arm_unknown;
22914 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
22917 /* Command line processing. */
22920 Invocation line includes a switch not recognized by the base assembler.
22921 See if it's a processor-specific option.
22923 This routine is somewhat complicated by the need for backwards
22924 compatibility (since older releases of gcc can't be changed).
22925 The new options try to make the interface as compatible as
22928 New options (supported) are:
22930 -mcpu=<cpu name> Assemble for selected processor
22931 -march=<architecture name> Assemble for selected architecture
22932 -mfpu=<fpu architecture> Assemble for selected FPU.
22933 -EB/-mbig-endian Big-endian
22934 -EL/-mlittle-endian Little-endian
22935 -k Generate PIC code
22936 -mthumb Start in Thumb mode
22937 -mthumb-interwork Code supports ARM/Thumb interworking
22939 -m[no-]warn-deprecated Warn about deprecated features
22941 For now we will also provide support for:
22943 -mapcs-32 32-bit Program counter
22944 -mapcs-26 26-bit Program counter
22945 -macps-float Floats passed in FP registers
22946 -mapcs-reentrant Reentrant code
22948 (sometime these will probably be replaced with -mapcs=<list of options>
22949 and -matpcs=<list of options>)
22951 The remaining options are only supported for back-wards compatibility.
22952 Cpu variants, the arm part is optional:
22953 -m[arm]1 Currently not supported.
22954 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
22955 -m[arm]3 Arm 3 processor
22956 -m[arm]6[xx], Arm 6 processors
22957 -m[arm]7[xx][t][[d]m] Arm 7 processors
22958 -m[arm]8[10] Arm 8 processors
22959 -m[arm]9[20][tdmi] Arm 9 processors
22960 -mstrongarm[110[0]] StrongARM processors
22961 -mxscale XScale processors
22962 -m[arm]v[2345[t[e]]] Arm architectures
22963 -mall All (except the ARM1)
22965 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
22966 -mfpe-old (No float load/store multiples)
22967 -mvfpxd VFP Single precision
22969 -mno-fpu Disable all floating point instructions
22971 The following CPU names are recognized:
22972 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22973 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22974 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22975 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22976 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22977 arm10t arm10e, arm1020t, arm1020e, arm10200e,
22978 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22982 const char * md_shortopts = "m:k";
22984 #ifdef ARM_BI_ENDIAN
22985 #define OPTION_EB (OPTION_MD_BASE + 0)
22986 #define OPTION_EL (OPTION_MD_BASE + 1)
22988 #if TARGET_BYTES_BIG_ENDIAN
22989 #define OPTION_EB (OPTION_MD_BASE + 0)
22991 #define OPTION_EL (OPTION_MD_BASE + 1)
22994 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22996 struct option md_longopts[] =
22999 {"EB", no_argument, NULL, OPTION_EB},
23002 {"EL", no_argument, NULL, OPTION_EL},
23004 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
23005 {NULL, no_argument, NULL, 0}
23008 size_t md_longopts_size = sizeof (md_longopts);
23010 struct arm_option_table
23012 char *option; /* Option name to match. */
23013 char *help; /* Help information. */
23014 int *var; /* Variable to change. */
23015 int value; /* What to change it to. */
23016 char *deprecated; /* If non-null, print this message. */
23019 struct arm_option_table arm_opts[] =
23021 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
23022 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
23023 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
23024 &support_interwork, 1, NULL},
23025 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
23026 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
23027 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
23029 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
23030 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
23031 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
23032 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
23035 /* These are recognized by the assembler, but have no affect on code. */
23036 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
23037 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
23039 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
23040 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
23041 &warn_on_deprecated, 0, NULL},
23042 {NULL, NULL, NULL, 0, NULL}
23045 struct arm_legacy_option_table
23047 char *option; /* Option name to match. */
23048 const arm_feature_set **var; /* Variable to change. */
23049 const arm_feature_set value; /* What to change it to. */
23050 char *deprecated; /* If non-null, print this message. */
23053 const struct arm_legacy_option_table arm_legacy_opts[] =
23055 /* DON'T add any new processors to this list -- we want the whole list
23056 to go away... Add them to the processors table instead. */
23057 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
23058 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
23059 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
23060 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
23061 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23062 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
23063 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23064 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
23065 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
23066 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
23067 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
23068 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
23069 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
23070 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
23071 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
23072 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
23073 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
23074 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
23075 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
23076 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
23077 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
23078 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
23079 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
23080 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
23081 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
23082 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
23083 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
23084 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
23085 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
23086 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
23087 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
23088 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
23089 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
23090 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
23091 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23092 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
23093 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23094 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
23095 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23096 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
23097 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
23098 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
23099 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
23100 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
23101 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
23102 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
23103 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23104 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23105 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23106 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
23107 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23108 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
23109 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23110 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
23111 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23112 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
23113 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
23114 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
23115 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
23116 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
23117 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23118 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
23119 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23120 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
23121 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23122 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
23123 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23124 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
23125 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
23126 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
23127 N_("use -mcpu=strongarm110")},
23128 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
23129 N_("use -mcpu=strongarm1100")},
23130 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
23131 N_("use -mcpu=strongarm1110")},
23132 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
23133 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
23134 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
23136 /* Architecture variants -- don't add any more to this list either. */
23137 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
23138 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
23139 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23140 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
23141 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
23142 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
23143 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23144 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
23145 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
23146 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
23147 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23148 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
23149 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
23150 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
23151 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23152 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
23153 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23154 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
23156 /* Floating point variants -- don't add any more to this list either. */
23157 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
23158 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
23159 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
23160 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
23161 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
23163 {NULL, NULL, ARM_ARCH_NONE, NULL}
23166 struct arm_cpu_option_table
23170 const arm_feature_set value;
23171 /* For some CPUs we assume an FPU unless the user explicitly sets
23173 const arm_feature_set default_fpu;
23174 /* The canonical name of the CPU, or NULL to use NAME converted to upper
23176 const char *canonical_name;
23179 /* This list should, at a minimum, contain all the cpu names
23180 recognized by GCC. */
23181 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
23182 static const struct arm_cpu_option_table arm_cpus[] =
23184 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
23185 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
23186 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
23187 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23188 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23189 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23190 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23191 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23192 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23193 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23194 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23195 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23196 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23197 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23198 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23199 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23200 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23201 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23202 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23203 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23204 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23205 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23206 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23207 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23208 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23209 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23210 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23211 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23212 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23213 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23214 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23215 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23216 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23217 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23218 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23219 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23220 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23221 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23222 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23223 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
23224 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23225 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23226 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23227 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23228 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23229 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23230 /* For V5 or later processors we default to using VFP; but the user
23231 should really set the FPU type explicitly. */
23232 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23233 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23234 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23235 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23236 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23237 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23238 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
23239 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23240 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23241 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
23242 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23243 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23244 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23245 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23246 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23247 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
23248 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23249 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23250 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23251 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
23253 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23254 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23255 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23256 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23257 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23258 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23259 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
23260 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
23261 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
23263 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
23264 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
23265 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
23266 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
23267 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
23268 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
23269 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
23270 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
23271 FPU_NONE, "Cortex-A5"),
23272 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23273 FPU_ARCH_NEON_VFP_V4,
23275 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
23276 ARM_FEATURE (0, FPU_VFP_V3
23277 | FPU_NEON_EXT_V1),
23279 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
23280 ARM_FEATURE (0, FPU_VFP_V3
23281 | FPU_NEON_EXT_V1),
23283 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23284 FPU_ARCH_NEON_VFP_V4,
23286 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
23287 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
23289 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
23290 FPU_NONE, "Cortex-R5"),
23291 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
23292 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
23293 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
23294 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
23295 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
23296 /* ??? XSCALE is really an architecture. */
23297 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23298 /* ??? iwmmxt is not a processor. */
23299 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
23300 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
23301 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23303 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
23306 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
23310 struct arm_arch_option_table
23314 const arm_feature_set value;
23315 const arm_feature_set default_fpu;
23318 /* This list should, at a minimum, contain all the architecture names
23319 recognized by GCC. */
23320 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
23321 static const struct arm_arch_option_table arm_archs[] =
23323 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
23324 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
23325 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
23326 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
23327 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
23328 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
23329 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
23330 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
23331 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
23332 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
23333 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
23334 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
23335 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
23336 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
23337 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
23338 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
23339 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
23340 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
23341 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
23342 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
23343 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
23344 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
23345 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
23346 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
23347 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
23348 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
23349 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
23350 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
23351 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
23352 /* The official spelling of the ARMv7 profile variants is the dashed form.
23353 Accept the non-dashed form for compatibility with old toolchains. */
23354 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
23355 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
23356 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
23357 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
23358 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
23359 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
23360 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
23361 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
23362 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
23363 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
23364 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
23365 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23367 #undef ARM_ARCH_OPT
23369 /* ISA extensions in the co-processor and main instruction set space. */
23370 struct arm_option_extension_value_table
23374 const arm_feature_set value;
23375 const arm_feature_set allowed_archs;
23378 /* The following table must be in alphabetical order with a NULL last entry.
23380 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
23381 static const struct arm_option_extension_value_table arm_extensions[] =
23383 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
23384 ARM_FEATURE (ARM_EXT_V8, 0)),
23385 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
23386 ARM_FEATURE (ARM_EXT_V8, 0)),
23387 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23388 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23389 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
23390 ARM_EXT_OPT ("iwmmxt2",
23391 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
23392 ARM_EXT_OPT ("maverick",
23393 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
23394 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
23395 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23396 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
23397 ARM_FEATURE (ARM_EXT_V8, 0)),
23398 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
23399 ARM_FEATURE (ARM_EXT_V6M, 0)),
23400 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
23401 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
23402 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
23404 ARM_FEATURE (ARM_EXT_V7A, 0)),
23405 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
23406 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23410 /* ISA floating-point and Advanced SIMD extensions. */
23411 struct arm_option_fpu_value_table
23414 const arm_feature_set value;
23417 /* This list should, at a minimum, contain all the fpu names
23418 recognized by GCC. */
23419 static const struct arm_option_fpu_value_table arm_fpus[] =
23421 {"softfpa", FPU_NONE},
23422 {"fpe", FPU_ARCH_FPE},
23423 {"fpe2", FPU_ARCH_FPE},
23424 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
23425 {"fpa", FPU_ARCH_FPA},
23426 {"fpa10", FPU_ARCH_FPA},
23427 {"fpa11", FPU_ARCH_FPA},
23428 {"arm7500fe", FPU_ARCH_FPA},
23429 {"softvfp", FPU_ARCH_VFP},
23430 {"softvfp+vfp", FPU_ARCH_VFP_V2},
23431 {"vfp", FPU_ARCH_VFP_V2},
23432 {"vfp9", FPU_ARCH_VFP_V2},
23433 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
23434 {"vfp10", FPU_ARCH_VFP_V2},
23435 {"vfp10-r0", FPU_ARCH_VFP_V1},
23436 {"vfpxd", FPU_ARCH_VFP_V1xD},
23437 {"vfpv2", FPU_ARCH_VFP_V2},
23438 {"vfpv3", FPU_ARCH_VFP_V3},
23439 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
23440 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
23441 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
23442 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
23443 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
23444 {"arm1020t", FPU_ARCH_VFP_V1},
23445 {"arm1020e", FPU_ARCH_VFP_V2},
23446 {"arm1136jfs", FPU_ARCH_VFP_V2},
23447 {"arm1136jf-s", FPU_ARCH_VFP_V2},
23448 {"maverick", FPU_ARCH_MAVERICK},
23449 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
23450 {"neon-fp16", FPU_ARCH_NEON_FP16},
23451 {"vfpv4", FPU_ARCH_VFP_V4},
23452 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
23453 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
23454 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
23455 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
23456 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
23457 {"crypto-neon-fp-armv8",
23458 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
23459 {NULL, ARM_ARCH_NONE}
23462 struct arm_option_value_table
23468 static const struct arm_option_value_table arm_float_abis[] =
23470 {"hard", ARM_FLOAT_ABI_HARD},
23471 {"softfp", ARM_FLOAT_ABI_SOFTFP},
23472 {"soft", ARM_FLOAT_ABI_SOFT},
23477 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
23478 static const struct arm_option_value_table arm_eabis[] =
23480 {"gnu", EF_ARM_EABI_UNKNOWN},
23481 {"4", EF_ARM_EABI_VER4},
23482 {"5", EF_ARM_EABI_VER5},
23487 struct arm_long_option_table
23489 char * option; /* Substring to match. */
23490 char * help; /* Help information. */
23491 int (* func) (char * subopt); /* Function to decode sub-option. */
23492 char * deprecated; /* If non-null, print this message. */
23496 arm_parse_extension (char *str, const arm_feature_set **opt_p)
23498 arm_feature_set *ext_set = (arm_feature_set *)
23499 xmalloc (sizeof (arm_feature_set));
23501 /* We insist on extensions being specified in alphabetical order, and with
23502 extensions being added before being removed. We achieve this by having
23503 the global ARM_EXTENSIONS table in alphabetical order, and using the
23504 ADDING_VALUE variable to indicate whether we are adding an extension (1)
23505 or removing it (0) and only allowing it to change in the order
23507 const struct arm_option_extension_value_table * opt = NULL;
23508 int adding_value = -1;
23510 /* Copy the feature set, so that we can modify it. */
23511 *ext_set = **opt_p;
23514 while (str != NULL && *str != 0)
23521 as_bad (_("invalid architectural extension"));
23526 ext = strchr (str, '+');
23531 len = strlen (str);
23533 if (len >= 2 && strncmp (str, "no", 2) == 0)
23535 if (adding_value != 0)
23538 opt = arm_extensions;
23546 if (adding_value == -1)
23549 opt = arm_extensions;
23551 else if (adding_value != 1)
23553 as_bad (_("must specify extensions to add before specifying "
23554 "those to remove"));
23561 as_bad (_("missing architectural extension"));
23565 gas_assert (adding_value != -1);
23566 gas_assert (opt != NULL);
23568 /* Scan over the options table trying to find an exact match. */
23569 for (; opt->name != NULL; opt++)
23570 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23572 /* Check we can apply the extension to this architecture. */
23573 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
23575 as_bad (_("extension does not apply to the base architecture"));
23579 /* Add or remove the extension. */
23581 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
23583 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
23588 if (opt->name == NULL)
23590 /* Did we fail to find an extension because it wasn't specified in
23591 alphabetical order, or because it does not exist? */
23593 for (opt = arm_extensions; opt->name != NULL; opt++)
23594 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23597 if (opt->name == NULL)
23598 as_bad (_("unknown architectural extension `%s'"), str);
23600 as_bad (_("architectural extensions must be specified in "
23601 "alphabetical order"));
23607 /* We should skip the extension we've just matched the next time
23619 arm_parse_cpu (char *str)
23621 const struct arm_cpu_option_table *opt;
23622 char *ext = strchr (str, '+');
23628 len = strlen (str);
23632 as_bad (_("missing cpu name `%s'"), str);
23636 for (opt = arm_cpus; opt->name != NULL; opt++)
23637 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23639 mcpu_cpu_opt = &opt->value;
23640 mcpu_fpu_opt = &opt->default_fpu;
23641 if (opt->canonical_name)
23642 strcpy (selected_cpu_name, opt->canonical_name);
23647 for (i = 0; i < len; i++)
23648 selected_cpu_name[i] = TOUPPER (opt->name[i]);
23649 selected_cpu_name[i] = 0;
23653 return arm_parse_extension (ext, &mcpu_cpu_opt);
23658 as_bad (_("unknown cpu `%s'"), str);
23663 arm_parse_arch (char *str)
23665 const struct arm_arch_option_table *opt;
23666 char *ext = strchr (str, '+');
23672 len = strlen (str);
23676 as_bad (_("missing architecture name `%s'"), str);
23680 for (opt = arm_archs; opt->name != NULL; opt++)
23681 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23683 march_cpu_opt = &opt->value;
23684 march_fpu_opt = &opt->default_fpu;
23685 strcpy (selected_cpu_name, opt->name);
23688 return arm_parse_extension (ext, &march_cpu_opt);
23693 as_bad (_("unknown architecture `%s'\n"), str);
23698 arm_parse_fpu (char * str)
23700 const struct arm_option_fpu_value_table * opt;
23702 for (opt = arm_fpus; opt->name != NULL; opt++)
23703 if (streq (opt->name, str))
23705 mfpu_opt = &opt->value;
23709 as_bad (_("unknown floating point format `%s'\n"), str);
23714 arm_parse_float_abi (char * str)
23716 const struct arm_option_value_table * opt;
23718 for (opt = arm_float_abis; opt->name != NULL; opt++)
23719 if (streq (opt->name, str))
23721 mfloat_abi_opt = opt->value;
23725 as_bad (_("unknown floating point abi `%s'\n"), str);
23731 arm_parse_eabi (char * str)
23733 const struct arm_option_value_table *opt;
23735 for (opt = arm_eabis; opt->name != NULL; opt++)
23736 if (streq (opt->name, str))
23738 meabi_flags = opt->value;
23741 as_bad (_("unknown EABI `%s'\n"), str);
23747 arm_parse_it_mode (char * str)
23749 bfd_boolean ret = TRUE;
23751 if (streq ("arm", str))
23752 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
23753 else if (streq ("thumb", str))
23754 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
23755 else if (streq ("always", str))
23756 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
23757 else if (streq ("never", str))
23758 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
23761 as_bad (_("unknown implicit IT mode `%s', should be "\
23762 "arm, thumb, always, or never."), str);
23769 struct arm_long_option_table arm_long_opts[] =
23771 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
23772 arm_parse_cpu, NULL},
23773 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
23774 arm_parse_arch, NULL},
23775 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
23776 arm_parse_fpu, NULL},
23777 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
23778 arm_parse_float_abi, NULL},
23780 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
23781 arm_parse_eabi, NULL},
23783 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
23784 arm_parse_it_mode, NULL},
23785 {NULL, NULL, 0, NULL}
23789 md_parse_option (int c, char * arg)
23791 struct arm_option_table *opt;
23792 const struct arm_legacy_option_table *fopt;
23793 struct arm_long_option_table *lopt;
23799 target_big_endian = 1;
23805 target_big_endian = 0;
23809 case OPTION_FIX_V4BX:
23814 /* Listing option. Just ignore these, we don't support additional
23819 for (opt = arm_opts; opt->option != NULL; opt++)
23821 if (c == opt->option[0]
23822 && ((arg == NULL && opt->option[1] == 0)
23823 || streq (arg, opt->option + 1)))
23825 /* If the option is deprecated, tell the user. */
23826 if (warn_on_deprecated && opt->deprecated != NULL)
23827 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23828 arg ? arg : "", _(opt->deprecated));
23830 if (opt->var != NULL)
23831 *opt->var = opt->value;
23837 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
23839 if (c == fopt->option[0]
23840 && ((arg == NULL && fopt->option[1] == 0)
23841 || streq (arg, fopt->option + 1)))
23843 /* If the option is deprecated, tell the user. */
23844 if (warn_on_deprecated && fopt->deprecated != NULL)
23845 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23846 arg ? arg : "", _(fopt->deprecated));
23848 if (fopt->var != NULL)
23849 *fopt->var = &fopt->value;
23855 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23857 /* These options are expected to have an argument. */
23858 if (c == lopt->option[0]
23860 && strncmp (arg, lopt->option + 1,
23861 strlen (lopt->option + 1)) == 0)
23863 /* If the option is deprecated, tell the user. */
23864 if (warn_on_deprecated && lopt->deprecated != NULL)
23865 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
23866 _(lopt->deprecated));
23868 /* Call the sup-option parser. */
23869 return lopt->func (arg + strlen (lopt->option) - 1);
23880 md_show_usage (FILE * fp)
23882 struct arm_option_table *opt;
23883 struct arm_long_option_table *lopt;
23885 fprintf (fp, _(" ARM-specific assembler options:\n"));
23887 for (opt = arm_opts; opt->option != NULL; opt++)
23888 if (opt->help != NULL)
23889 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
23891 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23892 if (lopt->help != NULL)
23893 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
23897 -EB assemble code for a big-endian cpu\n"));
23902 -EL assemble code for a little-endian cpu\n"));
23906 --fix-v4bx Allow BX in ARMv4 code\n"));
23914 arm_feature_set flags;
23915 } cpu_arch_ver_table;
23917 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
23918 least features first. */
23919 static const cpu_arch_ver_table cpu_arch_ver[] =
23925 {4, ARM_ARCH_V5TE},
23926 {5, ARM_ARCH_V5TEJ},
23930 {11, ARM_ARCH_V6M},
23931 {12, ARM_ARCH_V6SM},
23932 {8, ARM_ARCH_V6T2},
23933 {10, ARM_ARCH_V7A_IDIV_MP_SEC_VIRT},
23934 {10, ARM_ARCH_V7R},
23935 {10, ARM_ARCH_V7M},
23936 {14, ARM_ARCH_V8A},
23940 /* Set an attribute if it has not already been set by the user. */
23942 aeabi_set_attribute_int (int tag, int value)
23945 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23946 || !attributes_set_explicitly[tag])
23947 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
23951 aeabi_set_attribute_string (int tag, const char *value)
23954 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23955 || !attributes_set_explicitly[tag])
23956 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
23959 /* Set the public EABI object attributes. */
23961 aeabi_set_public_attributes (void)
23966 int fp16_optional = 0;
23967 arm_feature_set flags;
23968 arm_feature_set tmp;
23969 const cpu_arch_ver_table *p;
23971 /* Choose the architecture based on the capabilities of the requested cpu
23972 (if any) and/or the instructions actually used. */
23973 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
23974 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
23975 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
23977 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
23978 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
23980 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
23981 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
23983 /* Allow the user to override the reported architecture. */
23986 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
23987 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
23990 /* We need to make sure that the attributes do not identify us as v6S-M
23991 when the only v6S-M feature in use is the Operating System Extensions. */
23992 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
23993 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
23994 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
23998 for (p = cpu_arch_ver; p->val; p++)
24000 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
24003 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
24007 /* The table lookup above finds the last architecture to contribute
24008 a new feature. Unfortunately, Tag13 is a subset of the union of
24009 v6T2 and v7-M, so it is never seen as contributing a new feature.
24010 We can not search for the last entry which is entirely used,
24011 because if no CPU is specified we build up only those flags
24012 actually used. Perhaps we should separate out the specified
24013 and implicit cases. Avoid taking this path for -march=all by
24014 checking for contradictory v7-A / v7-M features. */
24016 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
24017 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
24018 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
24021 /* Tag_CPU_name. */
24022 if (selected_cpu_name[0])
24026 q = selected_cpu_name;
24027 if (strncmp (q, "armv", 4) == 0)
24032 for (i = 0; q[i]; i++)
24033 q[i] = TOUPPER (q[i]);
24035 aeabi_set_attribute_string (Tag_CPU_name, q);
24038 /* Tag_CPU_arch. */
24039 aeabi_set_attribute_int (Tag_CPU_arch, arch);
24041 /* Tag_CPU_arch_profile. */
24042 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
24044 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
24046 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
24051 if (profile != '\0')
24052 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
24054 /* Tag_ARM_ISA_use. */
24055 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
24057 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
24059 /* Tag_THUMB_ISA_use. */
24060 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
24062 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
24063 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
24065 /* Tag_VFP_arch. */
24066 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
24067 aeabi_set_attribute_int (Tag_VFP_arch, 7);
24068 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
24069 aeabi_set_attribute_int (Tag_VFP_arch,
24070 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
24072 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
24075 aeabi_set_attribute_int (Tag_VFP_arch, 3);
24077 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
24079 aeabi_set_attribute_int (Tag_VFP_arch, 4);
24082 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
24083 aeabi_set_attribute_int (Tag_VFP_arch, 2);
24084 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
24085 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
24086 aeabi_set_attribute_int (Tag_VFP_arch, 1);
24088 /* Tag_ABI_HardFP_use. */
24089 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
24090 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
24091 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
24093 /* Tag_WMMX_arch. */
24094 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
24095 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
24096 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
24097 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
24099 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
24100 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
24101 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
24102 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
24104 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
24106 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
24110 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
24115 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
24116 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
24117 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
24121 We set Tag_DIV_use to two when integer divide instructions have been used
24122 in ARM state, or when Thumb integer divide instructions have been used,
24123 but we have no architecture profile set, nor have we any ARM instructions.
24125 For ARMv8 we set the tag to 0 as integer divide is implied by the base
24128 For new architectures we will have to check these tests. */
24129 gas_assert (arch <= TAG_CPU_ARCH_V8);
24130 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
24131 aeabi_set_attribute_int (Tag_DIV_use, 0);
24132 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
24133 || (profile == '\0'
24134 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
24135 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
24136 aeabi_set_attribute_int (Tag_DIV_use, 2);
24138 /* Tag_MP_extension_use. */
24139 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
24140 aeabi_set_attribute_int (Tag_MPextension_use, 1);
24142 /* Tag Virtualization_use. */
24143 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
24145 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
24148 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
24151 /* Add the default contents for the .ARM.attributes section. */
24155 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
24158 aeabi_set_public_attributes ();
24160 #endif /* OBJ_ELF */
24163 /* Parse a .cpu directive. */
24166 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
24168 const struct arm_cpu_option_table *opt;
24172 name = input_line_pointer;
24173 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24174 input_line_pointer++;
24175 saved_char = *input_line_pointer;
24176 *input_line_pointer = 0;
24178 /* Skip the first "all" entry. */
24179 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
24180 if (streq (opt->name, name))
24182 mcpu_cpu_opt = &opt->value;
24183 selected_cpu = opt->value;
24184 if (opt->canonical_name)
24185 strcpy (selected_cpu_name, opt->canonical_name);
24189 for (i = 0; opt->name[i]; i++)
24190 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24192 selected_cpu_name[i] = 0;
24194 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24195 *input_line_pointer = saved_char;
24196 demand_empty_rest_of_line ();
24199 as_bad (_("unknown cpu `%s'"), name);
24200 *input_line_pointer = saved_char;
24201 ignore_rest_of_line ();
24205 /* Parse a .arch directive. */
24208 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
24210 const struct arm_arch_option_table *opt;
24214 name = input_line_pointer;
24215 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24216 input_line_pointer++;
24217 saved_char = *input_line_pointer;
24218 *input_line_pointer = 0;
24220 /* Skip the first "all" entry. */
24221 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24222 if (streq (opt->name, name))
24224 mcpu_cpu_opt = &opt->value;
24225 selected_cpu = opt->value;
24226 strcpy (selected_cpu_name, opt->name);
24227 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24228 *input_line_pointer = saved_char;
24229 demand_empty_rest_of_line ();
24233 as_bad (_("unknown architecture `%s'\n"), name);
24234 *input_line_pointer = saved_char;
24235 ignore_rest_of_line ();
24239 /* Parse a .object_arch directive. */
24242 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
24244 const struct arm_arch_option_table *opt;
24248 name = input_line_pointer;
24249 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24250 input_line_pointer++;
24251 saved_char = *input_line_pointer;
24252 *input_line_pointer = 0;
24254 /* Skip the first "all" entry. */
24255 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24256 if (streq (opt->name, name))
24258 object_arch = &opt->value;
24259 *input_line_pointer = saved_char;
24260 demand_empty_rest_of_line ();
24264 as_bad (_("unknown architecture `%s'\n"), name);
24265 *input_line_pointer = saved_char;
24266 ignore_rest_of_line ();
24269 /* Parse a .arch_extension directive. */
24272 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
24274 const struct arm_option_extension_value_table *opt;
24277 int adding_value = 1;
24279 name = input_line_pointer;
24280 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24281 input_line_pointer++;
24282 saved_char = *input_line_pointer;
24283 *input_line_pointer = 0;
24285 if (strlen (name) >= 2
24286 && strncmp (name, "no", 2) == 0)
24292 for (opt = arm_extensions; opt->name != NULL; opt++)
24293 if (streq (opt->name, name))
24295 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
24297 as_bad (_("architectural extension `%s' is not allowed for the "
24298 "current base architecture"), name);
24303 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
24305 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
24307 mcpu_cpu_opt = &selected_cpu;
24308 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24309 *input_line_pointer = saved_char;
24310 demand_empty_rest_of_line ();
24314 if (opt->name == NULL)
24315 as_bad (_("unknown architecture `%s'\n"), name);
24317 *input_line_pointer = saved_char;
24318 ignore_rest_of_line ();
24321 /* Parse a .fpu directive. */
24324 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
24326 const struct arm_option_fpu_value_table *opt;
24330 name = input_line_pointer;
24331 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24332 input_line_pointer++;
24333 saved_char = *input_line_pointer;
24334 *input_line_pointer = 0;
24336 for (opt = arm_fpus; opt->name != NULL; opt++)
24337 if (streq (opt->name, name))
24339 mfpu_opt = &opt->value;
24340 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24341 *input_line_pointer = saved_char;
24342 demand_empty_rest_of_line ();
24346 as_bad (_("unknown floating point format `%s'\n"), name);
24347 *input_line_pointer = saved_char;
24348 ignore_rest_of_line ();
24351 /* Copy symbol information. */
24354 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
24356 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
24360 /* Given a symbolic attribute NAME, return the proper integer value.
24361 Returns -1 if the attribute is not known. */
24364 arm_convert_symbolic_attribute (const char *name)
24366 static const struct
24371 attribute_table[] =
24373 /* When you modify this table you should
24374 also modify the list in doc/c-arm.texi. */
24375 #define T(tag) {#tag, tag}
24376 T (Tag_CPU_raw_name),
24379 T (Tag_CPU_arch_profile),
24380 T (Tag_ARM_ISA_use),
24381 T (Tag_THUMB_ISA_use),
24385 T (Tag_Advanced_SIMD_arch),
24386 T (Tag_PCS_config),
24387 T (Tag_ABI_PCS_R9_use),
24388 T (Tag_ABI_PCS_RW_data),
24389 T (Tag_ABI_PCS_RO_data),
24390 T (Tag_ABI_PCS_GOT_use),
24391 T (Tag_ABI_PCS_wchar_t),
24392 T (Tag_ABI_FP_rounding),
24393 T (Tag_ABI_FP_denormal),
24394 T (Tag_ABI_FP_exceptions),
24395 T (Tag_ABI_FP_user_exceptions),
24396 T (Tag_ABI_FP_number_model),
24397 T (Tag_ABI_align_needed),
24398 T (Tag_ABI_align8_needed),
24399 T (Tag_ABI_align_preserved),
24400 T (Tag_ABI_align8_preserved),
24401 T (Tag_ABI_enum_size),
24402 T (Tag_ABI_HardFP_use),
24403 T (Tag_ABI_VFP_args),
24404 T (Tag_ABI_WMMX_args),
24405 T (Tag_ABI_optimization_goals),
24406 T (Tag_ABI_FP_optimization_goals),
24407 T (Tag_compatibility),
24408 T (Tag_CPU_unaligned_access),
24409 T (Tag_FP_HP_extension),
24410 T (Tag_VFP_HP_extension),
24411 T (Tag_ABI_FP_16bit_format),
24412 T (Tag_MPextension_use),
24414 T (Tag_nodefaults),
24415 T (Tag_also_compatible_with),
24416 T (Tag_conformance),
24418 T (Tag_Virtualization_use),
24419 /* We deliberately do not include Tag_MPextension_use_legacy. */
24427 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
24428 if (streq (name, attribute_table[i].name))
24429 return attribute_table[i].tag;
24435 /* Apply sym value for relocations only in the case that
24436 they are for local symbols and you have the respective
24437 architectural feature for blx and simple switches. */
24439 arm_apply_sym_value (struct fix * fixP)
24442 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24443 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
24445 switch (fixP->fx_r_type)
24447 case BFD_RELOC_ARM_PCREL_BLX:
24448 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24449 if (ARM_IS_FUNC (fixP->fx_addsy))
24453 case BFD_RELOC_ARM_PCREL_CALL:
24454 case BFD_RELOC_THUMB_PCREL_BLX:
24455 if (THUMB_IS_FUNC (fixP->fx_addsy))
24466 #endif /* OBJ_ELF */