1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
3 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 3, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
35 #include "libiberty.h"
36 #include "opcode/arm.h"
40 #include "dw2gencfi.h"
43 #include "dwarf2dbg.h"
46 /* Must be at least the size of the largest unwind opcode (currently two). */
47 #define ARM_OPCODE_CHUNK_SIZE 8
49 /* This structure holds the unwinding state. */
54 symbolS * table_entry;
55 symbolS * personality_routine;
56 int personality_index;
57 /* The segment containing the function. */
60 /* Opcodes generated from this function. */
61 unsigned char * opcodes;
64 /* The number of bytes pushed to the stack. */
66 /* We don't add stack adjustment opcodes immediately so that we can merge
67 multiple adjustments. We can also omit the final adjustment
68 when using a frame pointer. */
69 offsetT pending_offset;
70 /* These two fields are set by both unwind_movsp and unwind_setfp. They
71 hold the reg+offset to use when restoring sp from a frame pointer. */
74 /* Nonzero if an unwind_setfp directive has been seen. */
76 /* Nonzero if the last opcode restores sp from fp_reg. */
77 unsigned sp_restored:1;
82 /* Results from operand parsing worker functions. */
86 PARSE_OPERAND_SUCCESS,
88 PARSE_OPERAND_FAIL_NO_BACKTRACK
89 } parse_operand_result;
98 /* Types of processor to assemble for. */
100 /* The code that was here used to select a default CPU depending on compiler
101 pre-defines which were only present when doing native builds, thus
102 changing gas' default behaviour depending upon the build host.
104 If you have a target that requires a default CPU option then the you
105 should define CPU_DEFAULT here. */
110 # define FPU_DEFAULT FPU_ARCH_FPA
111 # elif defined (TE_NetBSD)
113 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
115 /* Legacy a.out format. */
116 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
118 # elif defined (TE_VXWORKS)
119 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
121 /* For backwards compatibility, default to FPA. */
122 # define FPU_DEFAULT FPU_ARCH_FPA
124 #endif /* ifndef FPU_DEFAULT */
126 #define streq(a, b) (strcmp (a, b) == 0)
128 static arm_feature_set cpu_variant;
129 static arm_feature_set arm_arch_used;
130 static arm_feature_set thumb_arch_used;
132 /* Flags stored in private area of BFD structure. */
133 static int uses_apcs_26 = FALSE;
134 static int atpcs = FALSE;
135 static int support_interwork = FALSE;
136 static int uses_apcs_float = FALSE;
137 static int pic_code = FALSE;
138 static int fix_v4bx = FALSE;
139 /* Warn on using deprecated features. */
140 static int warn_on_deprecated = TRUE;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
213 static const arm_feature_set arm_cext_iwmmxt2 =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218 ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230 ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
244 static int mfloat_abi_opt = -1;
245 /* Record user cpu selection for object attributes. */
246 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
247 /* Must be long enough to hold any of the names in arm_cpus. */
248 static char selected_cpu_name[16];
250 /* Return if no cpu was selected on command-line. */
252 no_cpu_selected (void)
254 return selected_cpu.core == arm_arch_none.core
255 && selected_cpu.coproc == arm_arch_none.coproc;
260 static int meabi_flags = EABI_DEFAULT;
262 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
265 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
270 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
275 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
276 symbolS * GOT_symbol;
279 /* 0: assemble for ARM,
280 1: assemble for Thumb,
281 2: assemble for Thumb even though target CPU does not support thumb
283 static int thumb_mode = 0;
284 /* A value distinct from the possible values for thumb_mode that we
285 can use to record whether thumb_mode has been copied into the
286 tc_frag_data field of a frag. */
287 #define MODE_RECORDED (1 << 4)
289 /* Specifies the intrinsic IT insn behavior mode. */
290 enum implicit_it_mode
292 IMPLICIT_IT_MODE_NEVER = 0x00,
293 IMPLICIT_IT_MODE_ARM = 0x01,
294 IMPLICIT_IT_MODE_THUMB = 0x02,
295 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
297 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
299 /* If unified_syntax is true, we are processing the new unified
300 ARM/Thumb syntax. Important differences from the old ARM mode:
302 - Immediate operands do not require a # prefix.
303 - Conditional affixes always appear at the end of the
304 instruction. (For backward compatibility, those instructions
305 that formerly had them in the middle, continue to accept them
307 - The IT instruction may appear, and if it does is validated
308 against subsequent conditional affixes. It does not generate
311 Important differences from the old Thumb mode:
313 - Immediate operands do not require a # prefix.
314 - Most of the V6T2 instructions are only available in unified mode.
315 - The .N and .W suffixes are recognized and honored (it is an error
316 if they cannot be honored).
317 - All instructions set the flags if and only if they have an 's' affix.
318 - Conditional affixes may be used. They are validated against
319 preceding IT instructions. Unlike ARM mode, you cannot use a
320 conditional affix except in the scope of an IT instruction. */
322 static bfd_boolean unified_syntax = FALSE;
337 enum neon_el_type type;
341 #define NEON_MAX_TYPE_ELS 4
345 struct neon_type_el el[NEON_MAX_TYPE_ELS];
349 enum it_instruction_type
354 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
355 if inside, should be the last one. */
356 NEUTRAL_IT_INSN, /* This could be either inside or outside,
357 i.e. BKPT and NOP. */
358 IT_INSN /* The IT insn has been parsed. */
361 /* The maximum number of operands we need. */
362 #define ARM_IT_MAX_OPERANDS 6
367 unsigned long instruction;
371 /* "uncond_value" is set to the value in place of the conditional field in
372 unconditional versions of the instruction, or -1 if nothing is
375 struct neon_type vectype;
376 /* This does not indicate an actual NEON instruction, only that
377 the mnemonic accepts neon-style type suffixes. */
379 /* Set to the opcode if the instruction needs relaxation.
380 Zero if the instruction is not relaxed. */
384 bfd_reloc_code_real_type type;
389 enum it_instruction_type it_insn_type;
395 struct neon_type_el vectype;
396 unsigned present : 1; /* Operand present. */
397 unsigned isreg : 1; /* Operand was a register. */
398 unsigned immisreg : 1; /* .imm field is a second register. */
399 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
400 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
401 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
402 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
403 instructions. This allows us to disambiguate ARM <-> vector insns. */
404 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
405 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
406 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
407 unsigned issingle : 1; /* Operand is VFP single-precision register. */
408 unsigned hasreloc : 1; /* Operand has relocation suffix. */
409 unsigned writeback : 1; /* Operand has trailing ! */
410 unsigned preind : 1; /* Preindexed address. */
411 unsigned postind : 1; /* Postindexed address. */
412 unsigned negative : 1; /* Index register was negated. */
413 unsigned shifted : 1; /* Shift applied to operation. */
414 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
415 } operands[ARM_IT_MAX_OPERANDS];
418 static struct arm_it inst;
420 #define NUM_FLOAT_VALS 8
422 const char * fp_const[] =
424 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
427 /* Number of littlenums required to hold an extended precision number. */
428 #define MAX_LITTLENUMS 6
430 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
440 #define CP_T_X 0x00008000
441 #define CP_T_Y 0x00400000
443 #define CONDS_BIT 0x00100000
444 #define LOAD_BIT 0x00100000
446 #define DOUBLE_LOAD_FLAG 0x00000001
450 const char * template_name;
454 #define COND_ALWAYS 0xE
458 const char * template_name;
462 struct asm_barrier_opt
464 const char * template_name;
468 /* The bit that distinguishes CPSR and SPSR. */
469 #define SPSR_BIT (1 << 22)
471 /* The individual PSR flag bits. */
472 #define PSR_c (1 << 16)
473 #define PSR_x (1 << 17)
474 #define PSR_s (1 << 18)
475 #define PSR_f (1 << 19)
480 bfd_reloc_code_real_type reloc;
485 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
486 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
491 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
494 /* Bits for DEFINED field in neon_typed_alias. */
495 #define NTA_HASTYPE 1
496 #define NTA_HASINDEX 2
498 struct neon_typed_alias
500 unsigned char defined;
502 struct neon_type_el eltype;
505 /* ARM register categories. This includes coprocessor numbers and various
506 architecture extensions' registers. */
533 /* Structure for a hash table entry for a register.
534 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
535 information which states whether a vector type or index is specified (for a
536 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
542 unsigned char builtin;
543 struct neon_typed_alias * neon;
546 /* Diagnostics used when we don't get a register of the expected type. */
547 const char * const reg_expected_msgs[] =
549 N_("ARM register expected"),
550 N_("bad or missing co-processor number"),
551 N_("co-processor register expected"),
552 N_("FPA register expected"),
553 N_("VFP single precision register expected"),
554 N_("VFP/Neon double precision register expected"),
555 N_("Neon quad precision register expected"),
556 N_("VFP single or double precision register expected"),
557 N_("Neon double or quad precision register expected"),
558 N_("VFP single, double or Neon quad precision register expected"),
559 N_("VFP system register expected"),
560 N_("Maverick MVF register expected"),
561 N_("Maverick MVD register expected"),
562 N_("Maverick MVFX register expected"),
563 N_("Maverick MVDX register expected"),
564 N_("Maverick MVAX register expected"),
565 N_("Maverick DSPSC register expected"),
566 N_("iWMMXt data register expected"),
567 N_("iWMMXt control register expected"),
568 N_("iWMMXt scalar register expected"),
569 N_("XScale accumulator register expected"),
572 /* Some well known registers that we refer to directly elsewhere. */
578 /* ARM instructions take 4bytes in the object file, Thumb instructions
584 /* Basic string to match. */
585 const char * template_name;
587 /* Parameters to instruction. */
588 unsigned int operands[8];
590 /* Conditional tag - see opcode_lookup. */
591 unsigned int tag : 4;
593 /* Basic instruction code. */
594 unsigned int avalue : 28;
596 /* Thumb-format instruction code. */
599 /* Which architecture variant provides this instruction. */
600 const arm_feature_set * avariant;
601 const arm_feature_set * tvariant;
603 /* Function to call to encode instruction in ARM format. */
604 void (* aencode) (void);
606 /* Function to call to encode instruction in Thumb format. */
607 void (* tencode) (void);
610 /* Defines for various bits that we will want to toggle. */
611 #define INST_IMMEDIATE 0x02000000
612 #define OFFSET_REG 0x02000000
613 #define HWOFFSET_IMM 0x00400000
614 #define SHIFT_BY_REG 0x00000010
615 #define PRE_INDEX 0x01000000
616 #define INDEX_UP 0x00800000
617 #define WRITE_BACK 0x00200000
618 #define LDM_TYPE_2_OR_3 0x00400000
619 #define CPSI_MMOD 0x00020000
621 #define LITERAL_MASK 0xf000f000
622 #define OPCODE_MASK 0xfe1fffff
623 #define V4_STR_BIT 0x00000020
625 #define T2_SUBS_PC_LR 0xf3de8f00
627 #define DATA_OP_SHIFT 21
629 #define T2_OPCODE_MASK 0xfe1fffff
630 #define T2_DATA_OP_SHIFT 21
632 #define A_COND_MASK 0xf0000000
633 #define A_PUSH_POP_OP_MASK 0x0fff0000
635 /* Opcodes for pushing/poping registers to/from the stack. */
636 #define A1_OPCODE_PUSH 0x092d0000
637 #define A2_OPCODE_PUSH 0x052d0004
638 #define A2_OPCODE_POP 0x049d0004
640 /* Codes to distinguish the arithmetic instructions. */
651 #define OPCODE_CMP 10
652 #define OPCODE_CMN 11
653 #define OPCODE_ORR 12
654 #define OPCODE_MOV 13
655 #define OPCODE_BIC 14
656 #define OPCODE_MVN 15
658 #define T2_OPCODE_AND 0
659 #define T2_OPCODE_BIC 1
660 #define T2_OPCODE_ORR 2
661 #define T2_OPCODE_ORN 3
662 #define T2_OPCODE_EOR 4
663 #define T2_OPCODE_ADD 8
664 #define T2_OPCODE_ADC 10
665 #define T2_OPCODE_SBC 11
666 #define T2_OPCODE_SUB 13
667 #define T2_OPCODE_RSB 14
669 #define T_OPCODE_MUL 0x4340
670 #define T_OPCODE_TST 0x4200
671 #define T_OPCODE_CMN 0x42c0
672 #define T_OPCODE_NEG 0x4240
673 #define T_OPCODE_MVN 0x43c0
675 #define T_OPCODE_ADD_R3 0x1800
676 #define T_OPCODE_SUB_R3 0x1a00
677 #define T_OPCODE_ADD_HI 0x4400
678 #define T_OPCODE_ADD_ST 0xb000
679 #define T_OPCODE_SUB_ST 0xb080
680 #define T_OPCODE_ADD_SP 0xa800
681 #define T_OPCODE_ADD_PC 0xa000
682 #define T_OPCODE_ADD_I8 0x3000
683 #define T_OPCODE_SUB_I8 0x3800
684 #define T_OPCODE_ADD_I3 0x1c00
685 #define T_OPCODE_SUB_I3 0x1e00
687 #define T_OPCODE_ASR_R 0x4100
688 #define T_OPCODE_LSL_R 0x4080
689 #define T_OPCODE_LSR_R 0x40c0
690 #define T_OPCODE_ROR_R 0x41c0
691 #define T_OPCODE_ASR_I 0x1000
692 #define T_OPCODE_LSL_I 0x0000
693 #define T_OPCODE_LSR_I 0x0800
695 #define T_OPCODE_MOV_I8 0x2000
696 #define T_OPCODE_CMP_I8 0x2800
697 #define T_OPCODE_CMP_LR 0x4280
698 #define T_OPCODE_MOV_HR 0x4600
699 #define T_OPCODE_CMP_HR 0x4500
701 #define T_OPCODE_LDR_PC 0x4800
702 #define T_OPCODE_LDR_SP 0x9800
703 #define T_OPCODE_STR_SP 0x9000
704 #define T_OPCODE_LDR_IW 0x6800
705 #define T_OPCODE_STR_IW 0x6000
706 #define T_OPCODE_LDR_IH 0x8800
707 #define T_OPCODE_STR_IH 0x8000
708 #define T_OPCODE_LDR_IB 0x7800
709 #define T_OPCODE_STR_IB 0x7000
710 #define T_OPCODE_LDR_RW 0x5800
711 #define T_OPCODE_STR_RW 0x5000
712 #define T_OPCODE_LDR_RH 0x5a00
713 #define T_OPCODE_STR_RH 0x5200
714 #define T_OPCODE_LDR_RB 0x5c00
715 #define T_OPCODE_STR_RB 0x5400
717 #define T_OPCODE_PUSH 0xb400
718 #define T_OPCODE_POP 0xbc00
720 #define T_OPCODE_BRANCH 0xe000
722 #define THUMB_SIZE 2 /* Size of thumb instruction. */
723 #define THUMB_PP_PC_LR 0x0100
724 #define THUMB_LOAD_BIT 0x0800
725 #define THUMB2_LOAD_BIT 0x00100000
727 #define BAD_ARGS _("bad arguments to instruction")
728 #define BAD_SP _("r13 not allowed here")
729 #define BAD_PC _("r15 not allowed here")
730 #define BAD_COND _("instruction cannot be conditional")
731 #define BAD_OVERLAP _("registers may not be the same")
732 #define BAD_HIREG _("lo register required")
733 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
734 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
735 #define BAD_BRANCH _("branch must be last instruction in IT block")
736 #define BAD_NOT_IT _("instruction not allowed in IT block")
737 #define BAD_FPU _("selected FPU does not support instruction")
738 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
739 #define BAD_IT_COND _("incorrect condition in IT block")
740 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
741 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
742 #define BAD_PC_ADDRESSING \
743 _("cannot use register index with PC-relative addressing")
744 #define BAD_PC_WRITEBACK \
745 _("cannot use writeback with PC-relative addressing")
746 #define BAD_RANGE _("branch out of range")
748 static struct hash_control * arm_ops_hsh;
749 static struct hash_control * arm_cond_hsh;
750 static struct hash_control * arm_shift_hsh;
751 static struct hash_control * arm_psr_hsh;
752 static struct hash_control * arm_v7m_psr_hsh;
753 static struct hash_control * arm_reg_hsh;
754 static struct hash_control * arm_reloc_hsh;
755 static struct hash_control * arm_barrier_opt_hsh;
757 /* Stuff needed to resolve the label ambiguity
766 symbolS * last_label_seen;
767 static int label_is_thumb_function_name = FALSE;
769 /* Literal pool structure. Held on a per-section
770 and per-sub-section basis. */
772 #define MAX_LITERAL_POOL_SIZE 1024
773 typedef struct literal_pool
775 expressionS literals [MAX_LITERAL_POOL_SIZE];
776 unsigned int next_free_entry;
782 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
784 struct literal_pool * next;
787 /* Pointer to a linked list of literal pools. */
788 literal_pool * list_of_pools = NULL;
791 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
793 static struct current_it now_it;
797 now_it_compatible (int cond)
799 return (cond & ~1) == (now_it.cc & ~1);
803 conditional_insn (void)
805 return inst.cond != COND_ALWAYS;
808 static int in_it_block (void);
810 static int handle_it_state (void);
812 static void force_automatic_it_block_close (void);
814 static void it_fsm_post_encode (void);
816 #define set_it_insn_type(type) \
819 inst.it_insn_type = type; \
820 if (handle_it_state () == FAIL) \
825 #define set_it_insn_type_nonvoid(type, failret) \
828 inst.it_insn_type = type; \
829 if (handle_it_state () == FAIL) \
834 #define set_it_insn_type_last() \
837 if (inst.cond == COND_ALWAYS) \
838 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
840 set_it_insn_type (INSIDE_IT_LAST_INSN); \
846 /* This array holds the chars that always start a comment. If the
847 pre-processor is disabled, these aren't very useful. */
848 const char comment_chars[] = "@";
850 /* This array holds the chars that only start a comment at the beginning of
851 a line. If the line seems to have the form '# 123 filename'
852 .line and .file directives will appear in the pre-processed output. */
853 /* Note that input_file.c hand checks for '#' at the beginning of the
854 first line of the input file. This is because the compiler outputs
855 #NO_APP at the beginning of its output. */
856 /* Also note that comments like this one will always work. */
857 const char line_comment_chars[] = "#";
859 const char line_separator_chars[] = ";";
861 /* Chars that can be used to separate mant
862 from exp in floating point numbers. */
863 const char EXP_CHARS[] = "eE";
865 /* Chars that mean this number is a floating point constant. */
869 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
871 /* Prefix characters that indicate the start of an immediate
873 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
875 /* Separator character handling. */
877 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
880 skip_past_char (char ** str, char c)
891 #define skip_past_comma(str) skip_past_char (str, ',')
893 /* Arithmetic expressions (possibly involving symbols). */
895 /* Return TRUE if anything in the expression is a bignum. */
898 walk_no_bignums (symbolS * sp)
900 if (symbol_get_value_expression (sp)->X_op == O_big)
903 if (symbol_get_value_expression (sp)->X_add_symbol)
905 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
906 || (symbol_get_value_expression (sp)->X_op_symbol
907 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
913 static int in_my_get_expression = 0;
915 /* Third argument to my_get_expression. */
916 #define GE_NO_PREFIX 0
917 #define GE_IMM_PREFIX 1
918 #define GE_OPT_PREFIX 2
919 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
920 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
921 #define GE_OPT_PREFIX_BIG 3
924 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
929 /* In unified syntax, all prefixes are optional. */
931 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
936 case GE_NO_PREFIX: break;
938 if (!is_immediate_prefix (**str))
940 inst.error = _("immediate expression requires a # prefix");
946 case GE_OPT_PREFIX_BIG:
947 if (is_immediate_prefix (**str))
953 memset (ep, 0, sizeof (expressionS));
955 save_in = input_line_pointer;
956 input_line_pointer = *str;
957 in_my_get_expression = 1;
958 seg = expression (ep);
959 in_my_get_expression = 0;
961 if (ep->X_op == O_illegal || ep->X_op == O_absent)
963 /* We found a bad or missing expression in md_operand(). */
964 *str = input_line_pointer;
965 input_line_pointer = save_in;
966 if (inst.error == NULL)
967 inst.error = (ep->X_op == O_absent
968 ? _("missing expression") :_("bad expression"));
973 if (seg != absolute_section
974 && seg != text_section
975 && seg != data_section
976 && seg != bss_section
977 && seg != undefined_section)
979 inst.error = _("bad segment");
980 *str = input_line_pointer;
981 input_line_pointer = save_in;
988 /* Get rid of any bignums now, so that we don't generate an error for which
989 we can't establish a line number later on. Big numbers are never valid
990 in instructions, which is where this routine is always called. */
991 if (prefix_mode != GE_OPT_PREFIX_BIG
992 && (ep->X_op == O_big
994 && (walk_no_bignums (ep->X_add_symbol)
996 && walk_no_bignums (ep->X_op_symbol))))))
998 inst.error = _("invalid constant");
999 *str = input_line_pointer;
1000 input_line_pointer = save_in;
1004 *str = input_line_pointer;
1005 input_line_pointer = save_in;
1009 /* Turn a string in input_line_pointer into a floating point constant
1010 of type TYPE, and store the appropriate bytes in *LITP. The number
1011 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1012 returned, or NULL on OK.
1014 Note that fp constants aren't represent in the normal way on the ARM.
1015 In big endian mode, things are as expected. However, in little endian
1016 mode fp constants are big-endian word-wise, and little-endian byte-wise
1017 within the words. For example, (double) 1.1 in big endian mode is
1018 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1019 the byte sequence 99 99 f1 3f 9a 99 99 99.
1021 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1024 md_atof (int type, char * litP, int * sizeP)
1027 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1059 return _("Unrecognized or unsupported floating point constant");
1062 t = atof_ieee (input_line_pointer, type, words);
1064 input_line_pointer = t;
1065 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1067 if (target_big_endian)
1069 for (i = 0; i < prec; i++)
1071 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1072 litP += sizeof (LITTLENUM_TYPE);
1077 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1078 for (i = prec - 1; i >= 0; i--)
1080 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1081 litP += sizeof (LITTLENUM_TYPE);
1084 /* For a 4 byte float the order of elements in `words' is 1 0.
1085 For an 8 byte float the order is 1 0 3 2. */
1086 for (i = 0; i < prec; i += 2)
1088 md_number_to_chars (litP, (valueT) words[i + 1],
1089 sizeof (LITTLENUM_TYPE));
1090 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1091 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1092 litP += 2 * sizeof (LITTLENUM_TYPE);
1099 /* We handle all bad expressions here, so that we can report the faulty
1100 instruction in the error message. */
1102 md_operand (expressionS * exp)
1104 if (in_my_get_expression)
1105 exp->X_op = O_illegal;
1108 /* Immediate values. */
1110 /* Generic immediate-value read function for use in directives.
1111 Accepts anything that 'expression' can fold to a constant.
1112 *val receives the number. */
1115 immediate_for_directive (int *val)
1118 exp.X_op = O_illegal;
1120 if (is_immediate_prefix (*input_line_pointer))
1122 input_line_pointer++;
1126 if (exp.X_op != O_constant)
1128 as_bad (_("expected #constant"));
1129 ignore_rest_of_line ();
1132 *val = exp.X_add_number;
1137 /* Register parsing. */
1139 /* Generic register parser. CCP points to what should be the
1140 beginning of a register name. If it is indeed a valid register
1141 name, advance CCP over it and return the reg_entry structure;
1142 otherwise return NULL. Does not issue diagnostics. */
1144 static struct reg_entry *
1145 arm_reg_parse_multi (char **ccp)
1149 struct reg_entry *reg;
1151 #ifdef REGISTER_PREFIX
1152 if (*start != REGISTER_PREFIX)
1156 #ifdef OPTIONAL_REGISTER_PREFIX
1157 if (*start == OPTIONAL_REGISTER_PREFIX)
1162 if (!ISALPHA (*p) || !is_name_beginner (*p))
1167 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1169 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1179 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1180 enum arm_reg_type type)
1182 /* Alternative syntaxes are accepted for a few register classes. */
1189 /* Generic coprocessor register names are allowed for these. */
1190 if (reg && reg->type == REG_TYPE_CN)
1195 /* For backward compatibility, a bare number is valid here. */
1197 unsigned long processor = strtoul (start, ccp, 10);
1198 if (*ccp != start && processor <= 15)
1202 case REG_TYPE_MMXWC:
1203 /* WC includes WCG. ??? I'm not sure this is true for all
1204 instructions that take WC registers. */
1205 if (reg && reg->type == REG_TYPE_MMXWCG)
1216 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1217 return value is the register number or FAIL. */
1220 arm_reg_parse (char **ccp, enum arm_reg_type type)
1223 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1226 /* Do not allow a scalar (reg+index) to parse as a register. */
1227 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1230 if (reg && reg->type == type)
1233 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1240 /* Parse a Neon type specifier. *STR should point at the leading '.'
1241 character. Does no verification at this stage that the type fits the opcode
1248 Can all be legally parsed by this function.
1250 Fills in neon_type struct pointer with parsed information, and updates STR
1251 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1252 type, FAIL if not. */
1255 parse_neon_type (struct neon_type *type, char **str)
1262 while (type->elems < NEON_MAX_TYPE_ELS)
1264 enum neon_el_type thistype = NT_untyped;
1265 unsigned thissize = -1u;
1272 /* Just a size without an explicit type. */
1276 switch (TOLOWER (*ptr))
1278 case 'i': thistype = NT_integer; break;
1279 case 'f': thistype = NT_float; break;
1280 case 'p': thistype = NT_poly; break;
1281 case 's': thistype = NT_signed; break;
1282 case 'u': thistype = NT_unsigned; break;
1284 thistype = NT_float;
1289 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1295 /* .f is an abbreviation for .f32. */
1296 if (thistype == NT_float && !ISDIGIT (*ptr))
1301 thissize = strtoul (ptr, &ptr, 10);
1303 if (thissize != 8 && thissize != 16 && thissize != 32
1306 as_bad (_("bad size %d in type specifier"), thissize);
1314 type->el[type->elems].type = thistype;
1315 type->el[type->elems].size = thissize;
1320 /* Empty/missing type is not a successful parse. */
1321 if (type->elems == 0)
1329 /* Errors may be set multiple times during parsing or bit encoding
1330 (particularly in the Neon bits), but usually the earliest error which is set
1331 will be the most meaningful. Avoid overwriting it with later (cascading)
1332 errors by calling this function. */
1335 first_error (const char *err)
1341 /* Parse a single type, e.g. ".s32", leading period included. */
1343 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1346 struct neon_type optype;
1350 if (parse_neon_type (&optype, &str) == SUCCESS)
1352 if (optype.elems == 1)
1353 *vectype = optype.el[0];
1356 first_error (_("only one type should be specified for operand"));
1362 first_error (_("vector type expected"));
1374 /* Special meanings for indices (which have a range of 0-7), which will fit into
1377 #define NEON_ALL_LANES 15
1378 #define NEON_INTERLEAVE_LANES 14
1380 /* Parse either a register or a scalar, with an optional type. Return the
1381 register number, and optionally fill in the actual type of the register
1382 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1383 type/index information in *TYPEINFO. */
1386 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1387 enum arm_reg_type *rtype,
1388 struct neon_typed_alias *typeinfo)
1391 struct reg_entry *reg = arm_reg_parse_multi (&str);
1392 struct neon_typed_alias atype;
1393 struct neon_type_el parsetype;
1397 atype.eltype.type = NT_invtype;
1398 atype.eltype.size = -1;
1400 /* Try alternate syntax for some types of register. Note these are mutually
1401 exclusive with the Neon syntax extensions. */
1404 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1412 /* Undo polymorphism when a set of register types may be accepted. */
1413 if ((type == REG_TYPE_NDQ
1414 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1415 || (type == REG_TYPE_VFSD
1416 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1417 || (type == REG_TYPE_NSDQ
1418 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1419 || reg->type == REG_TYPE_NQ))
1420 || (type == REG_TYPE_MMXWC
1421 && (reg->type == REG_TYPE_MMXWCG)))
1422 type = (enum arm_reg_type) reg->type;
1424 if (type != reg->type)
1430 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1432 if ((atype.defined & NTA_HASTYPE) != 0)
1434 first_error (_("can't redefine type for operand"));
1437 atype.defined |= NTA_HASTYPE;
1438 atype.eltype = parsetype;
1441 if (skip_past_char (&str, '[') == SUCCESS)
1443 if (type != REG_TYPE_VFD)
1445 first_error (_("only D registers may be indexed"));
1449 if ((atype.defined & NTA_HASINDEX) != 0)
1451 first_error (_("can't change index for operand"));
1455 atype.defined |= NTA_HASINDEX;
1457 if (skip_past_char (&str, ']') == SUCCESS)
1458 atype.index = NEON_ALL_LANES;
1463 my_get_expression (&exp, &str, GE_NO_PREFIX);
1465 if (exp.X_op != O_constant)
1467 first_error (_("constant expression required"));
1471 if (skip_past_char (&str, ']') == FAIL)
1474 atype.index = exp.X_add_number;
1489 /* Like arm_reg_parse, but allow allow the following extra features:
1490 - If RTYPE is non-zero, return the (possibly restricted) type of the
1491 register (e.g. Neon double or quad reg when either has been requested).
1492 - If this is a Neon vector type with additional type information, fill
1493 in the struct pointed to by VECTYPE (if non-NULL).
1494 This function will fault on encountering a scalar. */
1497 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1498 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1500 struct neon_typed_alias atype;
1502 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1507 /* Do not allow regname(... to parse as a register. */
1511 /* Do not allow a scalar (reg+index) to parse as a register. */
1512 if ((atype.defined & NTA_HASINDEX) != 0)
1514 first_error (_("register operand expected, but got scalar"));
1519 *vectype = atype.eltype;
1526 #define NEON_SCALAR_REG(X) ((X) >> 4)
1527 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1529 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1530 have enough information to be able to do a good job bounds-checking. So, we
1531 just do easy checks here, and do further checks later. */
1534 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1538 struct neon_typed_alias atype;
1540 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1542 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1545 if (atype.index == NEON_ALL_LANES)
1547 first_error (_("scalar must have an index"));
1550 else if (atype.index >= 64 / elsize)
1552 first_error (_("scalar index out of range"));
1557 *type = atype.eltype;
1561 return reg * 16 + atype.index;
1564 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1567 parse_reg_list (char ** strp)
1569 char * str = * strp;
1573 /* We come back here if we get ranges concatenated by '+' or '|'. */
1588 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1590 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1600 first_error (_("bad range in register list"));
1604 for (i = cur_reg + 1; i < reg; i++)
1606 if (range & (1 << i))
1608 (_("Warning: duplicated register (r%d) in register list"),
1616 if (range & (1 << reg))
1617 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1619 else if (reg <= cur_reg)
1620 as_tsktsk (_("Warning: register range not in ascending order"));
1625 while (skip_past_comma (&str) != FAIL
1626 || (in_range = 1, *str++ == '-'));
1631 first_error (_("missing `}'"));
1639 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1642 if (exp.X_op == O_constant)
1644 if (exp.X_add_number
1645 != (exp.X_add_number & 0x0000ffff))
1647 inst.error = _("invalid register mask");
1651 if ((range & exp.X_add_number) != 0)
1653 int regno = range & exp.X_add_number;
1656 regno = (1 << regno) - 1;
1658 (_("Warning: duplicated register (r%d) in register list"),
1662 range |= exp.X_add_number;
1666 if (inst.reloc.type != 0)
1668 inst.error = _("expression too complex");
1672 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1673 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1674 inst.reloc.pc_rel = 0;
1678 if (*str == '|' || *str == '+')
1684 while (another_range);
1690 /* Types of registers in a list. */
1699 /* Parse a VFP register list. If the string is invalid return FAIL.
1700 Otherwise return the number of registers, and set PBASE to the first
1701 register. Parses registers of type ETYPE.
1702 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1703 - Q registers can be used to specify pairs of D registers
1704 - { } can be omitted from around a singleton register list
1705 FIXME: This is not implemented, as it would require backtracking in
1708 This could be done (the meaning isn't really ambiguous), but doesn't
1709 fit in well with the current parsing framework.
1710 - 32 D registers may be used (also true for VFPv3).
1711 FIXME: Types are ignored in these register lists, which is probably a
1715 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1720 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1724 unsigned long mask = 0;
1729 inst.error = _("expecting {");
1738 regtype = REG_TYPE_VFS;
1743 regtype = REG_TYPE_VFD;
1746 case REGLIST_NEON_D:
1747 regtype = REG_TYPE_NDQ;
1751 if (etype != REGLIST_VFP_S)
1753 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1754 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1758 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1761 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1768 base_reg = max_regs;
1772 int setmask = 1, addregs = 1;
1774 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
1776 if (new_base == FAIL)
1778 first_error (_(reg_expected_msgs[regtype]));
1782 if (new_base >= max_regs)
1784 first_error (_("register out of range in list"));
1788 /* Note: a value of 2 * n is returned for the register Q<n>. */
1789 if (regtype == REG_TYPE_NQ)
1795 if (new_base < base_reg)
1796 base_reg = new_base;
1798 if (mask & (setmask << new_base))
1800 first_error (_("invalid register list"));
1804 if ((mask >> new_base) != 0 && ! warned)
1806 as_tsktsk (_("register list not in ascending order"));
1810 mask |= setmask << new_base;
1813 if (*str == '-') /* We have the start of a range expression */
1819 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1822 inst.error = gettext (reg_expected_msgs[regtype]);
1826 if (high_range >= max_regs)
1828 first_error (_("register out of range in list"));
1832 if (regtype == REG_TYPE_NQ)
1833 high_range = high_range + 1;
1835 if (high_range <= new_base)
1837 inst.error = _("register range not in ascending order");
1841 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1843 if (mask & (setmask << new_base))
1845 inst.error = _("invalid register list");
1849 mask |= setmask << new_base;
1854 while (skip_past_comma (&str) != FAIL);
1858 /* Sanity check -- should have raised a parse error above. */
1859 if (count == 0 || count > max_regs)
1864 /* Final test -- the registers must be consecutive. */
1866 for (i = 0; i < count; i++)
1868 if ((mask & (1u << i)) == 0)
1870 inst.error = _("non-contiguous register range");
1880 /* True if two alias types are the same. */
1883 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1891 if (a->defined != b->defined)
1894 if ((a->defined & NTA_HASTYPE) != 0
1895 && (a->eltype.type != b->eltype.type
1896 || a->eltype.size != b->eltype.size))
1899 if ((a->defined & NTA_HASINDEX) != 0
1900 && (a->index != b->index))
1906 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1907 The base register is put in *PBASE.
1908 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1910 The register stride (minus one) is put in bit 4 of the return value.
1911 Bits [6:5] encode the list length (minus one).
1912 The type of the list elements is put in *ELTYPE, if non-NULL. */
1914 #define NEON_LANE(X) ((X) & 0xf)
1915 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1916 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1919 parse_neon_el_struct_list (char **str, unsigned *pbase,
1920 struct neon_type_el *eltype)
1927 int leading_brace = 0;
1928 enum arm_reg_type rtype = REG_TYPE_NDQ;
1929 const char *const incr_error = _("register stride must be 1 or 2");
1930 const char *const type_error = _("mismatched element/structure types in list");
1931 struct neon_typed_alias firsttype;
1933 if (skip_past_char (&ptr, '{') == SUCCESS)
1938 struct neon_typed_alias atype;
1939 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1943 first_error (_(reg_expected_msgs[rtype]));
1950 if (rtype == REG_TYPE_NQ)
1956 else if (reg_incr == -1)
1958 reg_incr = getreg - base_reg;
1959 if (reg_incr < 1 || reg_incr > 2)
1961 first_error (_(incr_error));
1965 else if (getreg != base_reg + reg_incr * count)
1967 first_error (_(incr_error));
1971 if (! neon_alias_types_same (&atype, &firsttype))
1973 first_error (_(type_error));
1977 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1981 struct neon_typed_alias htype;
1982 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1984 lane = NEON_INTERLEAVE_LANES;
1985 else if (lane != NEON_INTERLEAVE_LANES)
1987 first_error (_(type_error));
1992 else if (reg_incr != 1)
1994 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1998 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2001 first_error (_(reg_expected_msgs[rtype]));
2004 if (! neon_alias_types_same (&htype, &firsttype))
2006 first_error (_(type_error));
2009 count += hireg + dregs - getreg;
2013 /* If we're using Q registers, we can't use [] or [n] syntax. */
2014 if (rtype == REG_TYPE_NQ)
2020 if ((atype.defined & NTA_HASINDEX) != 0)
2024 else if (lane != atype.index)
2026 first_error (_(type_error));
2030 else if (lane == -1)
2031 lane = NEON_INTERLEAVE_LANES;
2032 else if (lane != NEON_INTERLEAVE_LANES)
2034 first_error (_(type_error));
2039 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2041 /* No lane set by [x]. We must be interleaving structures. */
2043 lane = NEON_INTERLEAVE_LANES;
2046 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2047 || (count > 1 && reg_incr == -1))
2049 first_error (_("error parsing element/structure list"));
2053 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2055 first_error (_("expected }"));
2063 *eltype = firsttype.eltype;
2068 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2071 /* Parse an explicit relocation suffix on an expression. This is
2072 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2073 arm_reloc_hsh contains no entries, so this function can only
2074 succeed if there is no () after the word. Returns -1 on error,
2075 BFD_RELOC_UNUSED if there wasn't any suffix. */
2078 parse_reloc (char **str)
2080 struct reloc_entry *r;
2084 return BFD_RELOC_UNUSED;
2089 while (*q && *q != ')' && *q != ',')
2094 if ((r = (struct reloc_entry *)
2095 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2102 /* Directives: register aliases. */
2104 static struct reg_entry *
2105 insert_reg_alias (char *str, unsigned number, int type)
2107 struct reg_entry *new_reg;
2110 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2112 if (new_reg->builtin)
2113 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2115 /* Only warn about a redefinition if it's not defined as the
2117 else if (new_reg->number != number || new_reg->type != type)
2118 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2123 name = xstrdup (str);
2124 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2126 new_reg->name = name;
2127 new_reg->number = number;
2128 new_reg->type = type;
2129 new_reg->builtin = FALSE;
2130 new_reg->neon = NULL;
2132 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2139 insert_neon_reg_alias (char *str, int number, int type,
2140 struct neon_typed_alias *atype)
2142 struct reg_entry *reg = insert_reg_alias (str, number, type);
2146 first_error (_("attempt to redefine typed alias"));
2152 reg->neon = (struct neon_typed_alias *)
2153 xmalloc (sizeof (struct neon_typed_alias));
2154 *reg->neon = *atype;
2158 /* Look for the .req directive. This is of the form:
2160 new_register_name .req existing_register_name
2162 If we find one, or if it looks sufficiently like one that we want to
2163 handle any error here, return TRUE. Otherwise return FALSE. */
2166 create_register_alias (char * newname, char *p)
2168 struct reg_entry *old;
2169 char *oldname, *nbuf;
2172 /* The input scrubber ensures that whitespace after the mnemonic is
2173 collapsed to single spaces. */
2175 if (strncmp (oldname, " .req ", 6) != 0)
2179 if (*oldname == '\0')
2182 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2185 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2189 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2190 the desired alias name, and p points to its end. If not, then
2191 the desired alias name is in the global original_case_string. */
2192 #ifdef TC_CASE_SENSITIVE
2195 newname = original_case_string;
2196 nlen = strlen (newname);
2199 nbuf = (char *) alloca (nlen + 1);
2200 memcpy (nbuf, newname, nlen);
2203 /* Create aliases under the new name as stated; an all-lowercase
2204 version of the new name; and an all-uppercase version of the new
2206 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2208 for (p = nbuf; *p; p++)
2211 if (strncmp (nbuf, newname, nlen))
2213 /* If this attempt to create an additional alias fails, do not bother
2214 trying to create the all-lower case alias. We will fail and issue
2215 a second, duplicate error message. This situation arises when the
2216 programmer does something like:
2219 The second .req creates the "Foo" alias but then fails to create
2220 the artificial FOO alias because it has already been created by the
2222 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2226 for (p = nbuf; *p; p++)
2229 if (strncmp (nbuf, newname, nlen))
2230 insert_reg_alias (nbuf, old->number, old->type);
2236 /* Create a Neon typed/indexed register alias using directives, e.g.:
2241 These typed registers can be used instead of the types specified after the
2242 Neon mnemonic, so long as all operands given have types. Types can also be
2243 specified directly, e.g.:
2244 vadd d0.s32, d1.s32, d2.s32 */
2247 create_neon_reg_alias (char *newname, char *p)
2249 enum arm_reg_type basetype;
2250 struct reg_entry *basereg;
2251 struct reg_entry mybasereg;
2252 struct neon_type ntype;
2253 struct neon_typed_alias typeinfo;
2254 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2257 typeinfo.defined = 0;
2258 typeinfo.eltype.type = NT_invtype;
2259 typeinfo.eltype.size = -1;
2260 typeinfo.index = -1;
2264 if (strncmp (p, " .dn ", 5) == 0)
2265 basetype = REG_TYPE_VFD;
2266 else if (strncmp (p, " .qn ", 5) == 0)
2267 basetype = REG_TYPE_NQ;
2276 basereg = arm_reg_parse_multi (&p);
2278 if (basereg && basereg->type != basetype)
2280 as_bad (_("bad type for register"));
2284 if (basereg == NULL)
2287 /* Try parsing as an integer. */
2288 my_get_expression (&exp, &p, GE_NO_PREFIX);
2289 if (exp.X_op != O_constant)
2291 as_bad (_("expression must be constant"));
2294 basereg = &mybasereg;
2295 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2301 typeinfo = *basereg->neon;
2303 if (parse_neon_type (&ntype, &p) == SUCCESS)
2305 /* We got a type. */
2306 if (typeinfo.defined & NTA_HASTYPE)
2308 as_bad (_("can't redefine the type of a register alias"));
2312 typeinfo.defined |= NTA_HASTYPE;
2313 if (ntype.elems != 1)
2315 as_bad (_("you must specify a single type only"));
2318 typeinfo.eltype = ntype.el[0];
2321 if (skip_past_char (&p, '[') == SUCCESS)
2324 /* We got a scalar index. */
2326 if (typeinfo.defined & NTA_HASINDEX)
2328 as_bad (_("can't redefine the index of a scalar alias"));
2332 my_get_expression (&exp, &p, GE_NO_PREFIX);
2334 if (exp.X_op != O_constant)
2336 as_bad (_("scalar index must be constant"));
2340 typeinfo.defined |= NTA_HASINDEX;
2341 typeinfo.index = exp.X_add_number;
2343 if (skip_past_char (&p, ']') == FAIL)
2345 as_bad (_("expecting ]"));
2350 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2351 the desired alias name, and p points to its end. If not, then
2352 the desired alias name is in the global original_case_string. */
2353 #ifdef TC_CASE_SENSITIVE
2354 namelen = nameend - newname;
2356 newname = original_case_string;
2357 namelen = strlen (newname);
2360 namebuf = (char *) alloca (namelen + 1);
2361 strncpy (namebuf, newname, namelen);
2362 namebuf[namelen] = '\0';
2364 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2365 typeinfo.defined != 0 ? &typeinfo : NULL);
2367 /* Insert name in all uppercase. */
2368 for (p = namebuf; *p; p++)
2371 if (strncmp (namebuf, newname, namelen))
2372 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2373 typeinfo.defined != 0 ? &typeinfo : NULL);
2375 /* Insert name in all lowercase. */
2376 for (p = namebuf; *p; p++)
2379 if (strncmp (namebuf, newname, namelen))
2380 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2381 typeinfo.defined != 0 ? &typeinfo : NULL);
2386 /* Should never be called, as .req goes between the alias and the
2387 register name, not at the beginning of the line. */
2390 s_req (int a ATTRIBUTE_UNUSED)
2392 as_bad (_("invalid syntax for .req directive"));
2396 s_dn (int a ATTRIBUTE_UNUSED)
2398 as_bad (_("invalid syntax for .dn directive"));
2402 s_qn (int a ATTRIBUTE_UNUSED)
2404 as_bad (_("invalid syntax for .qn directive"));
2407 /* The .unreq directive deletes an alias which was previously defined
2408 by .req. For example:
2414 s_unreq (int a ATTRIBUTE_UNUSED)
2419 name = input_line_pointer;
2421 while (*input_line_pointer != 0
2422 && *input_line_pointer != ' '
2423 && *input_line_pointer != '\n')
2424 ++input_line_pointer;
2426 saved_char = *input_line_pointer;
2427 *input_line_pointer = 0;
2430 as_bad (_("invalid syntax for .unreq directive"));
2433 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2437 as_bad (_("unknown register alias '%s'"), name);
2438 else if (reg->builtin)
2439 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2446 hash_delete (arm_reg_hsh, name, FALSE);
2447 free ((char *) reg->name);
2452 /* Also locate the all upper case and all lower case versions.
2453 Do not complain if we cannot find one or the other as it
2454 was probably deleted above. */
2456 nbuf = strdup (name);
2457 for (p = nbuf; *p; p++)
2459 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2462 hash_delete (arm_reg_hsh, nbuf, FALSE);
2463 free ((char *) reg->name);
2469 for (p = nbuf; *p; p++)
2471 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2474 hash_delete (arm_reg_hsh, nbuf, FALSE);
2475 free ((char *) reg->name);
2485 *input_line_pointer = saved_char;
2486 demand_empty_rest_of_line ();
2489 /* Directives: Instruction set selection. */
2492 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2493 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2494 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2495 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2497 /* Create a new mapping symbol for the transition to STATE. */
2500 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2503 const char * symname;
2510 type = BSF_NO_FLAGS;
2514 type = BSF_NO_FLAGS;
2518 type = BSF_NO_FLAGS;
2524 symbolP = symbol_new (symname, now_seg, value, frag);
2525 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2530 THUMB_SET_FUNC (symbolP, 0);
2531 ARM_SET_THUMB (symbolP, 0);
2532 ARM_SET_INTERWORK (symbolP, support_interwork);
2536 THUMB_SET_FUNC (symbolP, 1);
2537 ARM_SET_THUMB (symbolP, 1);
2538 ARM_SET_INTERWORK (symbolP, support_interwork);
2546 /* Save the mapping symbols for future reference. Also check that
2547 we do not place two mapping symbols at the same offset within a
2548 frag. We'll handle overlap between frags in
2549 check_mapping_symbols.
2551 If .fill or other data filling directive generates zero sized data,
2552 the mapping symbol for the following code will have the same value
2553 as the one generated for the data filling directive. In this case,
2554 we replace the old symbol with the new one at the same address. */
2557 if (frag->tc_frag_data.first_map != NULL)
2559 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2560 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2562 frag->tc_frag_data.first_map = symbolP;
2564 if (frag->tc_frag_data.last_map != NULL)
2566 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2567 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2568 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2570 frag->tc_frag_data.last_map = symbolP;
2573 /* We must sometimes convert a region marked as code to data during
2574 code alignment, if an odd number of bytes have to be padded. The
2575 code mapping symbol is pushed to an aligned address. */
2578 insert_data_mapping_symbol (enum mstate state,
2579 valueT value, fragS *frag, offsetT bytes)
2581 /* If there was already a mapping symbol, remove it. */
2582 if (frag->tc_frag_data.last_map != NULL
2583 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2585 symbolS *symp = frag->tc_frag_data.last_map;
2589 know (frag->tc_frag_data.first_map == symp);
2590 frag->tc_frag_data.first_map = NULL;
2592 frag->tc_frag_data.last_map = NULL;
2593 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2596 make_mapping_symbol (MAP_DATA, value, frag);
2597 make_mapping_symbol (state, value + bytes, frag);
2600 static void mapping_state_2 (enum mstate state, int max_chars);
2602 /* Set the mapping state to STATE. Only call this when about to
2603 emit some STATE bytes to the file. */
2606 mapping_state (enum mstate state)
2608 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2610 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2612 if (mapstate == state)
2613 /* The mapping symbol has already been emitted.
2614 There is nothing else to do. */
2617 if (state == MAP_ARM || state == MAP_THUMB)
2619 All ARM instructions require 4-byte alignment.
2620 (Almost) all Thumb instructions require 2-byte alignment.
2622 When emitting instructions into any section, mark the section
2625 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2626 but themselves require 2-byte alignment; this applies to some
2627 PC- relative forms. However, these cases will invovle implicit
2628 literal pool generation or an explicit .align >=2, both of
2629 which will cause the section to me marked with sufficient
2630 alignment. Thus, we don't handle those cases here. */
2631 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2633 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2634 /* This case will be evaluated later in the next else. */
2636 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2637 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2639 /* Only add the symbol if the offset is > 0:
2640 if we're at the first frag, check it's size > 0;
2641 if we're not at the first frag, then for sure
2642 the offset is > 0. */
2643 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2644 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2647 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2650 mapping_state_2 (state, 0);
2654 /* Same as mapping_state, but MAX_CHARS bytes have already been
2655 allocated. Put the mapping symbol that far back. */
2658 mapping_state_2 (enum mstate state, int max_chars)
2660 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2662 if (!SEG_NORMAL (now_seg))
2665 if (mapstate == state)
2666 /* The mapping symbol has already been emitted.
2667 There is nothing else to do. */
2670 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2671 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2674 #define mapping_state(x) ((void)0)
2675 #define mapping_state_2(x, y) ((void)0)
2678 /* Find the real, Thumb encoded start of a Thumb function. */
2682 find_real_start (symbolS * symbolP)
2685 const char * name = S_GET_NAME (symbolP);
2686 symbolS * new_target;
2688 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2689 #define STUB_NAME ".real_start_of"
2694 /* The compiler may generate BL instructions to local labels because
2695 it needs to perform a branch to a far away location. These labels
2696 do not have a corresponding ".real_start_of" label. We check
2697 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2698 the ".real_start_of" convention for nonlocal branches. */
2699 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2702 real_start = ACONCAT ((STUB_NAME, name, NULL));
2703 new_target = symbol_find (real_start);
2705 if (new_target == NULL)
2707 as_warn (_("Failed to find real start of function: %s\n"), name);
2708 new_target = symbolP;
2716 opcode_select (int width)
2723 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2724 as_bad (_("selected processor does not support THUMB opcodes"));
2727 /* No need to force the alignment, since we will have been
2728 coming from ARM mode, which is word-aligned. */
2729 record_alignment (now_seg, 1);
2736 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2737 as_bad (_("selected processor does not support ARM opcodes"));
2742 frag_align (2, 0, 0);
2744 record_alignment (now_seg, 1);
2749 as_bad (_("invalid instruction size selected (%d)"), width);
2754 s_arm (int ignore ATTRIBUTE_UNUSED)
2757 demand_empty_rest_of_line ();
2761 s_thumb (int ignore ATTRIBUTE_UNUSED)
2764 demand_empty_rest_of_line ();
2768 s_code (int unused ATTRIBUTE_UNUSED)
2772 temp = get_absolute_expression ();
2777 opcode_select (temp);
2781 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2786 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2788 /* If we are not already in thumb mode go into it, EVEN if
2789 the target processor does not support thumb instructions.
2790 This is used by gcc/config/arm/lib1funcs.asm for example
2791 to compile interworking support functions even if the
2792 target processor should not support interworking. */
2796 record_alignment (now_seg, 1);
2799 demand_empty_rest_of_line ();
2803 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2807 /* The following label is the name/address of the start of a Thumb function.
2808 We need to know this for the interworking support. */
2809 label_is_thumb_function_name = TRUE;
2812 /* Perform a .set directive, but also mark the alias as
2813 being a thumb function. */
2816 s_thumb_set (int equiv)
2818 /* XXX the following is a duplicate of the code for s_set() in read.c
2819 We cannot just call that code as we need to get at the symbol that
2826 /* Especial apologies for the random logic:
2827 This just grew, and could be parsed much more simply!
2829 name = input_line_pointer;
2830 delim = get_symbol_end ();
2831 end_name = input_line_pointer;
2834 if (*input_line_pointer != ',')
2837 as_bad (_("expected comma after name \"%s\""), name);
2839 ignore_rest_of_line ();
2843 input_line_pointer++;
2846 if (name[0] == '.' && name[1] == '\0')
2848 /* XXX - this should not happen to .thumb_set. */
2852 if ((symbolP = symbol_find (name)) == NULL
2853 && (symbolP = md_undefined_symbol (name)) == NULL)
2856 /* When doing symbol listings, play games with dummy fragments living
2857 outside the normal fragment chain to record the file and line info
2859 if (listing & LISTING_SYMBOLS)
2861 extern struct list_info_struct * listing_tail;
2862 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2864 memset (dummy_frag, 0, sizeof (fragS));
2865 dummy_frag->fr_type = rs_fill;
2866 dummy_frag->line = listing_tail;
2867 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2868 dummy_frag->fr_symbol = symbolP;
2872 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2875 /* "set" symbols are local unless otherwise specified. */
2876 SF_SET_LOCAL (symbolP);
2877 #endif /* OBJ_COFF */
2878 } /* Make a new symbol. */
2880 symbol_table_insert (symbolP);
2885 && S_IS_DEFINED (symbolP)
2886 && S_GET_SEGMENT (symbolP) != reg_section)
2887 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2889 pseudo_set (symbolP);
2891 demand_empty_rest_of_line ();
2893 /* XXX Now we come to the Thumb specific bit of code. */
2895 THUMB_SET_FUNC (symbolP, 1);
2896 ARM_SET_THUMB (symbolP, 1);
2897 #if defined OBJ_ELF || defined OBJ_COFF
2898 ARM_SET_INTERWORK (symbolP, support_interwork);
2902 /* Directives: Mode selection. */
2904 /* .syntax [unified|divided] - choose the new unified syntax
2905 (same for Arm and Thumb encoding, modulo slight differences in what
2906 can be represented) or the old divergent syntax for each mode. */
2908 s_syntax (int unused ATTRIBUTE_UNUSED)
2912 name = input_line_pointer;
2913 delim = get_symbol_end ();
2915 if (!strcasecmp (name, "unified"))
2916 unified_syntax = TRUE;
2917 else if (!strcasecmp (name, "divided"))
2918 unified_syntax = FALSE;
2921 as_bad (_("unrecognized syntax mode \"%s\""), name);
2924 *input_line_pointer = delim;
2925 demand_empty_rest_of_line ();
2928 /* Directives: sectioning and alignment. */
2930 /* Same as s_align_ptwo but align 0 => align 2. */
2933 s_align (int unused ATTRIBUTE_UNUSED)
2938 long max_alignment = 15;
2940 temp = get_absolute_expression ();
2941 if (temp > max_alignment)
2942 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2945 as_bad (_("alignment negative. 0 assumed."));
2949 if (*input_line_pointer == ',')
2951 input_line_pointer++;
2952 temp_fill = get_absolute_expression ();
2964 /* Only make a frag if we HAVE to. */
2965 if (temp && !need_pass_2)
2967 if (!fill_p && subseg_text_p (now_seg))
2968 frag_align_code (temp, 0);
2970 frag_align (temp, (int) temp_fill, 0);
2972 demand_empty_rest_of_line ();
2974 record_alignment (now_seg, temp);
2978 s_bss (int ignore ATTRIBUTE_UNUSED)
2980 /* We don't support putting frags in the BSS segment, we fake it by
2981 marking in_bss, then looking at s_skip for clues. */
2982 subseg_set (bss_section, 0);
2983 demand_empty_rest_of_line ();
2985 #ifdef md_elf_section_change_hook
2986 md_elf_section_change_hook ();
2991 s_even (int ignore ATTRIBUTE_UNUSED)
2993 /* Never make frag if expect extra pass. */
2995 frag_align (1, 0, 0);
2997 record_alignment (now_seg, 1);
2999 demand_empty_rest_of_line ();
3002 /* Directives: Literal pools. */
3004 static literal_pool *
3005 find_literal_pool (void)
3007 literal_pool * pool;
3009 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3011 if (pool->section == now_seg
3012 && pool->sub_section == now_subseg)
3019 static literal_pool *
3020 find_or_make_literal_pool (void)
3022 /* Next literal pool ID number. */
3023 static unsigned int latest_pool_num = 1;
3024 literal_pool * pool;
3026 pool = find_literal_pool ();
3030 /* Create a new pool. */
3031 pool = (literal_pool *) xmalloc (sizeof (* pool));
3035 pool->next_free_entry = 0;
3036 pool->section = now_seg;
3037 pool->sub_section = now_subseg;
3038 pool->next = list_of_pools;
3039 pool->symbol = NULL;
3041 /* Add it to the list. */
3042 list_of_pools = pool;
3045 /* New pools, and emptied pools, will have a NULL symbol. */
3046 if (pool->symbol == NULL)
3048 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3049 (valueT) 0, &zero_address_frag);
3050 pool->id = latest_pool_num ++;
3057 /* Add the literal in the global 'inst'
3058 structure to the relevant literal pool. */
3061 add_to_lit_pool (void)
3063 literal_pool * pool;
3066 pool = find_or_make_literal_pool ();
3068 /* Check if this literal value is already in the pool. */
3069 for (entry = 0; entry < pool->next_free_entry; entry ++)
3071 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3072 && (inst.reloc.exp.X_op == O_constant)
3073 && (pool->literals[entry].X_add_number
3074 == inst.reloc.exp.X_add_number)
3075 && (pool->literals[entry].X_unsigned
3076 == inst.reloc.exp.X_unsigned))
3079 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3080 && (inst.reloc.exp.X_op == O_symbol)
3081 && (pool->literals[entry].X_add_number
3082 == inst.reloc.exp.X_add_number)
3083 && (pool->literals[entry].X_add_symbol
3084 == inst.reloc.exp.X_add_symbol)
3085 && (pool->literals[entry].X_op_symbol
3086 == inst.reloc.exp.X_op_symbol))
3090 /* Do we need to create a new entry? */
3091 if (entry == pool->next_free_entry)
3093 if (entry >= MAX_LITERAL_POOL_SIZE)
3095 inst.error = _("literal pool overflow");
3099 pool->literals[entry] = inst.reloc.exp;
3101 /* PR ld/12974: Record the location of the first source line to reference
3102 this entry in the literal pool. If it turns out during linking that the
3103 symbol does not exist we will be able to give an accurate line number for
3104 the (first use of the) missing reference. */
3105 if (debug_type == DEBUG_DWARF2)
3106 dwarf2_where (pool->locs + entry);
3108 pool->next_free_entry += 1;
3111 inst.reloc.exp.X_op = O_symbol;
3112 inst.reloc.exp.X_add_number = ((int) entry) * 4;
3113 inst.reloc.exp.X_add_symbol = pool->symbol;
3118 /* Can't use symbol_new here, so have to create a symbol and then at
3119 a later date assign it a value. Thats what these functions do. */
3122 symbol_locate (symbolS * symbolP,
3123 const char * name, /* It is copied, the caller can modify. */
3124 segT segment, /* Segment identifier (SEG_<something>). */
3125 valueT valu, /* Symbol value. */
3126 fragS * frag) /* Associated fragment. */
3128 unsigned int name_length;
3129 char * preserved_copy_of_name;
3131 name_length = strlen (name) + 1; /* +1 for \0. */
3132 obstack_grow (¬es, name, name_length);
3133 preserved_copy_of_name = (char *) obstack_finish (¬es);
3135 #ifdef tc_canonicalize_symbol_name
3136 preserved_copy_of_name =
3137 tc_canonicalize_symbol_name (preserved_copy_of_name);
3140 S_SET_NAME (symbolP, preserved_copy_of_name);
3142 S_SET_SEGMENT (symbolP, segment);
3143 S_SET_VALUE (symbolP, valu);
3144 symbol_clear_list_pointers (symbolP);
3146 symbol_set_frag (symbolP, frag);
3148 /* Link to end of symbol chain. */
3150 extern int symbol_table_frozen;
3152 if (symbol_table_frozen)
3156 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3158 obj_symbol_new_hook (symbolP);
3160 #ifdef tc_symbol_new_hook
3161 tc_symbol_new_hook (symbolP);
3165 verify_symbol_chain (symbol_rootP, symbol_lastP);
3166 #endif /* DEBUG_SYMS */
3171 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3174 literal_pool * pool;
3177 pool = find_literal_pool ();
3179 || pool->symbol == NULL
3180 || pool->next_free_entry == 0)
3183 mapping_state (MAP_DATA);
3185 /* Align pool as you have word accesses.
3186 Only make a frag if we have to. */
3188 frag_align (2, 0, 0);
3190 record_alignment (now_seg, 2);
3192 sprintf (sym_name, "$$lit_\002%x", pool->id);
3194 symbol_locate (pool->symbol, sym_name, now_seg,
3195 (valueT) frag_now_fix (), frag_now);
3196 symbol_table_insert (pool->symbol);
3198 ARM_SET_THUMB (pool->symbol, thumb_mode);
3200 #if defined OBJ_COFF || defined OBJ_ELF
3201 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3204 for (entry = 0; entry < pool->next_free_entry; entry ++)
3207 if (debug_type == DEBUG_DWARF2)
3208 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3210 /* First output the expression in the instruction to the pool. */
3211 emit_expr (&(pool->literals[entry]), 4); /* .word */
3214 /* Mark the pool as empty. */
3215 pool->next_free_entry = 0;
3216 pool->symbol = NULL;
3220 /* Forward declarations for functions below, in the MD interface
3222 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3223 static valueT create_unwind_entry (int);
3224 static void start_unwind_section (const segT, int);
3225 static void add_unwind_opcode (valueT, int);
3226 static void flush_pending_unwind (void);
3228 /* Directives: Data. */
3231 s_arm_elf_cons (int nbytes)
3235 #ifdef md_flush_pending_output
3236 md_flush_pending_output ();
3239 if (is_it_end_of_statement ())
3241 demand_empty_rest_of_line ();
3245 #ifdef md_cons_align
3246 md_cons_align (nbytes);
3249 mapping_state (MAP_DATA);
3253 char *base = input_line_pointer;
3257 if (exp.X_op != O_symbol)
3258 emit_expr (&exp, (unsigned int) nbytes);
3261 char *before_reloc = input_line_pointer;
3262 reloc = parse_reloc (&input_line_pointer);
3265 as_bad (_("unrecognized relocation suffix"));
3266 ignore_rest_of_line ();
3269 else if (reloc == BFD_RELOC_UNUSED)
3270 emit_expr (&exp, (unsigned int) nbytes);
3273 reloc_howto_type *howto = (reloc_howto_type *)
3274 bfd_reloc_type_lookup (stdoutput,
3275 (bfd_reloc_code_real_type) reloc);
3276 int size = bfd_get_reloc_size (howto);
3278 if (reloc == BFD_RELOC_ARM_PLT32)
3280 as_bad (_("(plt) is only valid on branch targets"));
3281 reloc = BFD_RELOC_UNUSED;
3286 as_bad (_("%s relocations do not fit in %d bytes"),
3287 howto->name, nbytes);
3290 /* We've parsed an expression stopping at O_symbol.
3291 But there may be more expression left now that we
3292 have parsed the relocation marker. Parse it again.
3293 XXX Surely there is a cleaner way to do this. */
3294 char *p = input_line_pointer;
3296 char *save_buf = (char *) alloca (input_line_pointer - base);
3297 memcpy (save_buf, base, input_line_pointer - base);
3298 memmove (base + (input_line_pointer - before_reloc),
3299 base, before_reloc - base);
3301 input_line_pointer = base + (input_line_pointer-before_reloc);
3303 memcpy (base, save_buf, p - base);
3305 offset = nbytes - size;
3306 p = frag_more ((int) nbytes);
3307 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3308 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3313 while (*input_line_pointer++ == ',');
3315 /* Put terminator back into stream. */
3316 input_line_pointer --;
3317 demand_empty_rest_of_line ();
3320 /* Emit an expression containing a 32-bit thumb instruction.
3321 Implementation based on put_thumb32_insn. */
3324 emit_thumb32_expr (expressionS * exp)
3326 expressionS exp_high = *exp;
3328 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3329 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3330 exp->X_add_number &= 0xffff;
3331 emit_expr (exp, (unsigned int) THUMB_SIZE);
3334 /* Guess the instruction size based on the opcode. */
3337 thumb_insn_size (int opcode)
3339 if ((unsigned int) opcode < 0xe800u)
3341 else if ((unsigned int) opcode >= 0xe8000000u)
3348 emit_insn (expressionS *exp, int nbytes)
3352 if (exp->X_op == O_constant)
3357 size = thumb_insn_size (exp->X_add_number);
3361 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3363 as_bad (_(".inst.n operand too big. "\
3364 "Use .inst.w instead"));
3369 if (now_it.state == AUTOMATIC_IT_BLOCK)
3370 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3372 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3374 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3375 emit_thumb32_expr (exp);
3377 emit_expr (exp, (unsigned int) size);
3379 it_fsm_post_encode ();
3383 as_bad (_("cannot determine Thumb instruction size. " \
3384 "Use .inst.n/.inst.w instead"));
3387 as_bad (_("constant expression required"));
3392 /* Like s_arm_elf_cons but do not use md_cons_align and
3393 set the mapping state to MAP_ARM/MAP_THUMB. */
3396 s_arm_elf_inst (int nbytes)
3398 if (is_it_end_of_statement ())
3400 demand_empty_rest_of_line ();
3404 /* Calling mapping_state () here will not change ARM/THUMB,
3405 but will ensure not to be in DATA state. */
3408 mapping_state (MAP_THUMB);
3413 as_bad (_("width suffixes are invalid in ARM mode"));
3414 ignore_rest_of_line ();
3420 mapping_state (MAP_ARM);
3429 if (! emit_insn (& exp, nbytes))
3431 ignore_rest_of_line ();
3435 while (*input_line_pointer++ == ',');
3437 /* Put terminator back into stream. */
3438 input_line_pointer --;
3439 demand_empty_rest_of_line ();
3442 /* Parse a .rel31 directive. */
3445 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3452 if (*input_line_pointer == '1')
3453 highbit = 0x80000000;
3454 else if (*input_line_pointer != '0')
3455 as_bad (_("expected 0 or 1"));
3457 input_line_pointer++;
3458 if (*input_line_pointer != ',')
3459 as_bad (_("missing comma"));
3460 input_line_pointer++;
3462 #ifdef md_flush_pending_output
3463 md_flush_pending_output ();
3466 #ifdef md_cons_align
3470 mapping_state (MAP_DATA);
3475 md_number_to_chars (p, highbit, 4);
3476 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3477 BFD_RELOC_ARM_PREL31);
3479 demand_empty_rest_of_line ();
3482 /* Directives: AEABI stack-unwind tables. */
3484 /* Parse an unwind_fnstart directive. Simply records the current location. */
3487 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3489 demand_empty_rest_of_line ();
3490 if (unwind.proc_start)
3492 as_bad (_("duplicate .fnstart directive"));
3496 /* Mark the start of the function. */
3497 unwind.proc_start = expr_build_dot ();
3499 /* Reset the rest of the unwind info. */
3500 unwind.opcode_count = 0;
3501 unwind.table_entry = NULL;
3502 unwind.personality_routine = NULL;
3503 unwind.personality_index = -1;
3504 unwind.frame_size = 0;
3505 unwind.fp_offset = 0;
3506 unwind.fp_reg = REG_SP;
3508 unwind.sp_restored = 0;
3512 /* Parse a handlerdata directive. Creates the exception handling table entry
3513 for the function. */
3516 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3518 demand_empty_rest_of_line ();
3519 if (!unwind.proc_start)
3520 as_bad (MISSING_FNSTART);
3522 if (unwind.table_entry)
3523 as_bad (_("duplicate .handlerdata directive"));
3525 create_unwind_entry (1);
3528 /* Parse an unwind_fnend directive. Generates the index table entry. */
3531 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3536 unsigned int marked_pr_dependency;
3538 demand_empty_rest_of_line ();
3540 if (!unwind.proc_start)
3542 as_bad (_(".fnend directive without .fnstart"));
3546 /* Add eh table entry. */
3547 if (unwind.table_entry == NULL)
3548 val = create_unwind_entry (0);
3552 /* Add index table entry. This is two words. */
3553 start_unwind_section (unwind.saved_seg, 1);
3554 frag_align (2, 0, 0);
3555 record_alignment (now_seg, 2);
3557 ptr = frag_more (8);
3559 where = frag_now_fix () - 8;
3561 /* Self relative offset of the function start. */
3562 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3563 BFD_RELOC_ARM_PREL31);
3565 /* Indicate dependency on EHABI-defined personality routines to the
3566 linker, if it hasn't been done already. */
3567 marked_pr_dependency
3568 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3569 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3570 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3572 static const char *const name[] =
3574 "__aeabi_unwind_cpp_pr0",
3575 "__aeabi_unwind_cpp_pr1",
3576 "__aeabi_unwind_cpp_pr2"
3578 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3579 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3580 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3581 |= 1 << unwind.personality_index;
3585 /* Inline exception table entry. */
3586 md_number_to_chars (ptr + 4, val, 4);
3588 /* Self relative offset of the table entry. */
3589 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3590 BFD_RELOC_ARM_PREL31);
3592 /* Restore the original section. */
3593 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3595 unwind.proc_start = NULL;
3599 /* Parse an unwind_cantunwind directive. */
3602 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3604 demand_empty_rest_of_line ();
3605 if (!unwind.proc_start)
3606 as_bad (MISSING_FNSTART);
3608 if (unwind.personality_routine || unwind.personality_index != -1)
3609 as_bad (_("personality routine specified for cantunwind frame"));
3611 unwind.personality_index = -2;
3615 /* Parse a personalityindex directive. */
3618 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3622 if (!unwind.proc_start)
3623 as_bad (MISSING_FNSTART);
3625 if (unwind.personality_routine || unwind.personality_index != -1)
3626 as_bad (_("duplicate .personalityindex directive"));
3630 if (exp.X_op != O_constant
3631 || exp.X_add_number < 0 || exp.X_add_number > 15)
3633 as_bad (_("bad personality routine number"));
3634 ignore_rest_of_line ();
3638 unwind.personality_index = exp.X_add_number;
3640 demand_empty_rest_of_line ();
3644 /* Parse a personality directive. */
3647 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3651 if (!unwind.proc_start)
3652 as_bad (MISSING_FNSTART);
3654 if (unwind.personality_routine || unwind.personality_index != -1)
3655 as_bad (_("duplicate .personality directive"));
3657 name = input_line_pointer;
3658 c = get_symbol_end ();
3659 p = input_line_pointer;
3660 unwind.personality_routine = symbol_find_or_make (name);
3662 demand_empty_rest_of_line ();
3666 /* Parse a directive saving core registers. */
3669 s_arm_unwind_save_core (void)
3675 range = parse_reg_list (&input_line_pointer);
3678 as_bad (_("expected register list"));
3679 ignore_rest_of_line ();
3683 demand_empty_rest_of_line ();
3685 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3686 into .unwind_save {..., sp...}. We aren't bothered about the value of
3687 ip because it is clobbered by calls. */
3688 if (unwind.sp_restored && unwind.fp_reg == 12
3689 && (range & 0x3000) == 0x1000)
3691 unwind.opcode_count--;
3692 unwind.sp_restored = 0;
3693 range = (range | 0x2000) & ~0x1000;
3694 unwind.pending_offset = 0;
3700 /* See if we can use the short opcodes. These pop a block of up to 8
3701 registers starting with r4, plus maybe r14. */
3702 for (n = 0; n < 8; n++)
3704 /* Break at the first non-saved register. */
3705 if ((range & (1 << (n + 4))) == 0)
3708 /* See if there are any other bits set. */
3709 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3711 /* Use the long form. */
3712 op = 0x8000 | ((range >> 4) & 0xfff);
3713 add_unwind_opcode (op, 2);
3717 /* Use the short form. */
3719 op = 0xa8; /* Pop r14. */
3721 op = 0xa0; /* Do not pop r14. */
3723 add_unwind_opcode (op, 1);
3730 op = 0xb100 | (range & 0xf);
3731 add_unwind_opcode (op, 2);
3734 /* Record the number of bytes pushed. */
3735 for (n = 0; n < 16; n++)
3737 if (range & (1 << n))
3738 unwind.frame_size += 4;
3743 /* Parse a directive saving FPA registers. */
3746 s_arm_unwind_save_fpa (int reg)
3752 /* Get Number of registers to transfer. */
3753 if (skip_past_comma (&input_line_pointer) != FAIL)
3756 exp.X_op = O_illegal;
3758 if (exp.X_op != O_constant)
3760 as_bad (_("expected , <constant>"));
3761 ignore_rest_of_line ();
3765 num_regs = exp.X_add_number;
3767 if (num_regs < 1 || num_regs > 4)
3769 as_bad (_("number of registers must be in the range [1:4]"));
3770 ignore_rest_of_line ();
3774 demand_empty_rest_of_line ();
3779 op = 0xb4 | (num_regs - 1);
3780 add_unwind_opcode (op, 1);
3785 op = 0xc800 | (reg << 4) | (num_regs - 1);
3786 add_unwind_opcode (op, 2);
3788 unwind.frame_size += num_regs * 12;
3792 /* Parse a directive saving VFP registers for ARMv6 and above. */
3795 s_arm_unwind_save_vfp_armv6 (void)
3800 int num_vfpv3_regs = 0;
3801 int num_regs_below_16;
3803 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3806 as_bad (_("expected register list"));
3807 ignore_rest_of_line ();
3811 demand_empty_rest_of_line ();
3813 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3814 than FSTMX/FLDMX-style ones). */
3816 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3818 num_vfpv3_regs = count;
3819 else if (start + count > 16)
3820 num_vfpv3_regs = start + count - 16;
3822 if (num_vfpv3_regs > 0)
3824 int start_offset = start > 16 ? start - 16 : 0;
3825 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3826 add_unwind_opcode (op, 2);
3829 /* Generate opcode for registers numbered in the range 0 .. 15. */
3830 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3831 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
3832 if (num_regs_below_16 > 0)
3834 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3835 add_unwind_opcode (op, 2);
3838 unwind.frame_size += count * 8;
3842 /* Parse a directive saving VFP registers for pre-ARMv6. */
3845 s_arm_unwind_save_vfp (void)
3851 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
3854 as_bad (_("expected register list"));
3855 ignore_rest_of_line ();
3859 demand_empty_rest_of_line ();
3864 op = 0xb8 | (count - 1);
3865 add_unwind_opcode (op, 1);
3870 op = 0xb300 | (reg << 4) | (count - 1);
3871 add_unwind_opcode (op, 2);
3873 unwind.frame_size += count * 8 + 4;
3877 /* Parse a directive saving iWMMXt data registers. */
3880 s_arm_unwind_save_mmxwr (void)
3888 if (*input_line_pointer == '{')
3889 input_line_pointer++;
3893 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3897 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3902 as_tsktsk (_("register list not in ascending order"));
3905 if (*input_line_pointer == '-')
3907 input_line_pointer++;
3908 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3911 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
3914 else if (reg >= hi_reg)
3916 as_bad (_("bad register range"));
3919 for (; reg < hi_reg; reg++)
3923 while (skip_past_comma (&input_line_pointer) != FAIL);
3925 if (*input_line_pointer == '}')
3926 input_line_pointer++;
3928 demand_empty_rest_of_line ();
3930 /* Generate any deferred opcodes because we're going to be looking at
3932 flush_pending_unwind ();
3934 for (i = 0; i < 16; i++)
3936 if (mask & (1 << i))
3937 unwind.frame_size += 8;
3940 /* Attempt to combine with a previous opcode. We do this because gcc
3941 likes to output separate unwind directives for a single block of
3943 if (unwind.opcode_count > 0)
3945 i = unwind.opcodes[unwind.opcode_count - 1];
3946 if ((i & 0xf8) == 0xc0)
3949 /* Only merge if the blocks are contiguous. */
3952 if ((mask & 0xfe00) == (1 << 9))
3954 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3955 unwind.opcode_count--;
3958 else if (i == 6 && unwind.opcode_count >= 2)
3960 i = unwind.opcodes[unwind.opcode_count - 2];
3964 op = 0xffff << (reg - 1);
3966 && ((mask & op) == (1u << (reg - 1))))
3968 op = (1 << (reg + i + 1)) - 1;
3969 op &= ~((1 << reg) - 1);
3971 unwind.opcode_count -= 2;
3978 /* We want to generate opcodes in the order the registers have been
3979 saved, ie. descending order. */
3980 for (reg = 15; reg >= -1; reg--)
3982 /* Save registers in blocks. */
3984 || !(mask & (1 << reg)))
3986 /* We found an unsaved reg. Generate opcodes to save the
3993 op = 0xc0 | (hi_reg - 10);
3994 add_unwind_opcode (op, 1);
3999 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4000 add_unwind_opcode (op, 2);
4009 ignore_rest_of_line ();
4013 s_arm_unwind_save_mmxwcg (void)
4020 if (*input_line_pointer == '{')
4021 input_line_pointer++;
4025 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4029 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4035 as_tsktsk (_("register list not in ascending order"));
4038 if (*input_line_pointer == '-')
4040 input_line_pointer++;
4041 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4044 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4047 else if (reg >= hi_reg)
4049 as_bad (_("bad register range"));
4052 for (; reg < hi_reg; reg++)
4056 while (skip_past_comma (&input_line_pointer) != FAIL);
4058 if (*input_line_pointer == '}')
4059 input_line_pointer++;
4061 demand_empty_rest_of_line ();
4063 /* Generate any deferred opcodes because we're going to be looking at
4065 flush_pending_unwind ();
4067 for (reg = 0; reg < 16; reg++)
4069 if (mask & (1 << reg))
4070 unwind.frame_size += 4;
4073 add_unwind_opcode (op, 2);
4076 ignore_rest_of_line ();
4080 /* Parse an unwind_save directive.
4081 If the argument is non-zero, this is a .vsave directive. */
4084 s_arm_unwind_save (int arch_v6)
4087 struct reg_entry *reg;
4088 bfd_boolean had_brace = FALSE;
4090 if (!unwind.proc_start)
4091 as_bad (MISSING_FNSTART);
4093 /* Figure out what sort of save we have. */
4094 peek = input_line_pointer;
4102 reg = arm_reg_parse_multi (&peek);
4106 as_bad (_("register expected"));
4107 ignore_rest_of_line ();
4116 as_bad (_("FPA .unwind_save does not take a register list"));
4117 ignore_rest_of_line ();
4120 input_line_pointer = peek;
4121 s_arm_unwind_save_fpa (reg->number);
4124 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
4127 s_arm_unwind_save_vfp_armv6 ();
4129 s_arm_unwind_save_vfp ();
4131 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
4132 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
4135 as_bad (_(".unwind_save does not support this kind of register"));
4136 ignore_rest_of_line ();
4141 /* Parse an unwind_movsp directive. */
4144 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4150 if (!unwind.proc_start)
4151 as_bad (MISSING_FNSTART);
4153 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4156 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4157 ignore_rest_of_line ();
4161 /* Optional constant. */
4162 if (skip_past_comma (&input_line_pointer) != FAIL)
4164 if (immediate_for_directive (&offset) == FAIL)
4170 demand_empty_rest_of_line ();
4172 if (reg == REG_SP || reg == REG_PC)
4174 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4178 if (unwind.fp_reg != REG_SP)
4179 as_bad (_("unexpected .unwind_movsp directive"));
4181 /* Generate opcode to restore the value. */
4183 add_unwind_opcode (op, 1);
4185 /* Record the information for later. */
4186 unwind.fp_reg = reg;
4187 unwind.fp_offset = unwind.frame_size - offset;
4188 unwind.sp_restored = 1;
4191 /* Parse an unwind_pad directive. */
4194 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4198 if (!unwind.proc_start)
4199 as_bad (MISSING_FNSTART);
4201 if (immediate_for_directive (&offset) == FAIL)
4206 as_bad (_("stack increment must be multiple of 4"));
4207 ignore_rest_of_line ();
4211 /* Don't generate any opcodes, just record the details for later. */
4212 unwind.frame_size += offset;
4213 unwind.pending_offset += offset;
4215 demand_empty_rest_of_line ();
4218 /* Parse an unwind_setfp directive. */
4221 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4227 if (!unwind.proc_start)
4228 as_bad (MISSING_FNSTART);
4230 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4231 if (skip_past_comma (&input_line_pointer) == FAIL)
4234 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4236 if (fp_reg == FAIL || sp_reg == FAIL)
4238 as_bad (_("expected <reg>, <reg>"));
4239 ignore_rest_of_line ();
4243 /* Optional constant. */
4244 if (skip_past_comma (&input_line_pointer) != FAIL)
4246 if (immediate_for_directive (&offset) == FAIL)
4252 demand_empty_rest_of_line ();
4254 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4256 as_bad (_("register must be either sp or set by a previous"
4257 "unwind_movsp directive"));
4261 /* Don't generate any opcodes, just record the information for later. */
4262 unwind.fp_reg = fp_reg;
4264 if (sp_reg == REG_SP)
4265 unwind.fp_offset = unwind.frame_size - offset;
4267 unwind.fp_offset -= offset;
4270 /* Parse an unwind_raw directive. */
4273 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4276 /* This is an arbitrary limit. */
4277 unsigned char op[16];
4280 if (!unwind.proc_start)
4281 as_bad (MISSING_FNSTART);
4284 if (exp.X_op == O_constant
4285 && skip_past_comma (&input_line_pointer) != FAIL)
4287 unwind.frame_size += exp.X_add_number;
4291 exp.X_op = O_illegal;
4293 if (exp.X_op != O_constant)
4295 as_bad (_("expected <offset>, <opcode>"));
4296 ignore_rest_of_line ();
4302 /* Parse the opcode. */
4307 as_bad (_("unwind opcode too long"));
4308 ignore_rest_of_line ();
4310 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4312 as_bad (_("invalid unwind opcode"));
4313 ignore_rest_of_line ();
4316 op[count++] = exp.X_add_number;
4318 /* Parse the next byte. */
4319 if (skip_past_comma (&input_line_pointer) == FAIL)
4325 /* Add the opcode bytes in reverse order. */
4327 add_unwind_opcode (op[count], 1);
4329 demand_empty_rest_of_line ();
4333 /* Parse a .eabi_attribute directive. */
4336 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4338 int tag = s_vendor_attribute (OBJ_ATTR_PROC);
4340 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4341 attributes_set_explicitly[tag] = 1;
4344 /* Emit a tls fix for the symbol. */
4347 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4351 #ifdef md_flush_pending_output
4352 md_flush_pending_output ();
4355 #ifdef md_cons_align
4359 /* Since we're just labelling the code, there's no need to define a
4362 p = obstack_next_free (&frchain_now->frch_obstack);
4363 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4364 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4365 : BFD_RELOC_ARM_TLS_DESCSEQ);
4367 #endif /* OBJ_ELF */
4369 static void s_arm_arch (int);
4370 static void s_arm_object_arch (int);
4371 static void s_arm_cpu (int);
4372 static void s_arm_fpu (int);
4373 static void s_arm_arch_extension (int);
4378 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4385 if (exp.X_op == O_symbol)
4386 exp.X_op = O_secrel;
4388 emit_expr (&exp, 4);
4390 while (*input_line_pointer++ == ',');
4392 input_line_pointer--;
4393 demand_empty_rest_of_line ();
4397 /* This table describes all the machine specific pseudo-ops the assembler
4398 has to support. The fields are:
4399 pseudo-op name without dot
4400 function to call to execute this pseudo-op
4401 Integer arg to pass to the function. */
4403 const pseudo_typeS md_pseudo_table[] =
4405 /* Never called because '.req' does not start a line. */
4406 { "req", s_req, 0 },
4407 /* Following two are likewise never called. */
4410 { "unreq", s_unreq, 0 },
4411 { "bss", s_bss, 0 },
4412 { "align", s_align, 0 },
4413 { "arm", s_arm, 0 },
4414 { "thumb", s_thumb, 0 },
4415 { "code", s_code, 0 },
4416 { "force_thumb", s_force_thumb, 0 },
4417 { "thumb_func", s_thumb_func, 0 },
4418 { "thumb_set", s_thumb_set, 0 },
4419 { "even", s_even, 0 },
4420 { "ltorg", s_ltorg, 0 },
4421 { "pool", s_ltorg, 0 },
4422 { "syntax", s_syntax, 0 },
4423 { "cpu", s_arm_cpu, 0 },
4424 { "arch", s_arm_arch, 0 },
4425 { "object_arch", s_arm_object_arch, 0 },
4426 { "fpu", s_arm_fpu, 0 },
4427 { "arch_extension", s_arm_arch_extension, 0 },
4429 { "word", s_arm_elf_cons, 4 },
4430 { "long", s_arm_elf_cons, 4 },
4431 { "inst.n", s_arm_elf_inst, 2 },
4432 { "inst.w", s_arm_elf_inst, 4 },
4433 { "inst", s_arm_elf_inst, 0 },
4434 { "rel31", s_arm_rel31, 0 },
4435 { "fnstart", s_arm_unwind_fnstart, 0 },
4436 { "fnend", s_arm_unwind_fnend, 0 },
4437 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4438 { "personality", s_arm_unwind_personality, 0 },
4439 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4440 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4441 { "save", s_arm_unwind_save, 0 },
4442 { "vsave", s_arm_unwind_save, 1 },
4443 { "movsp", s_arm_unwind_movsp, 0 },
4444 { "pad", s_arm_unwind_pad, 0 },
4445 { "setfp", s_arm_unwind_setfp, 0 },
4446 { "unwind_raw", s_arm_unwind_raw, 0 },
4447 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4448 { "tlsdescseq", s_arm_tls_descseq, 0 },
4452 /* These are used for dwarf. */
4456 /* These are used for dwarf2. */
4457 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4458 { "loc", dwarf2_directive_loc, 0 },
4459 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4461 { "extend", float_cons, 'x' },
4462 { "ldouble", float_cons, 'x' },
4463 { "packed", float_cons, 'p' },
4465 {"secrel32", pe_directive_secrel, 0},
4470 /* Parser functions used exclusively in instruction operands. */
4472 /* Generic immediate-value read function for use in insn parsing.
4473 STR points to the beginning of the immediate (the leading #);
4474 VAL receives the value; if the value is outside [MIN, MAX]
4475 issue an error. PREFIX_OPT is true if the immediate prefix is
4479 parse_immediate (char **str, int *val, int min, int max,
4480 bfd_boolean prefix_opt)
4483 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4484 if (exp.X_op != O_constant)
4486 inst.error = _("constant expression required");
4490 if (exp.X_add_number < min || exp.X_add_number > max)
4492 inst.error = _("immediate value out of range");
4496 *val = exp.X_add_number;
4500 /* Less-generic immediate-value read function with the possibility of loading a
4501 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4502 instructions. Puts the result directly in inst.operands[i]. */
4505 parse_big_immediate (char **str, int i)
4510 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4512 if (exp.X_op == O_constant)
4514 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4515 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4516 O_constant. We have to be careful not to break compilation for
4517 32-bit X_add_number, though. */
4518 if ((exp.X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4520 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4521 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4522 inst.operands[i].regisimm = 1;
4525 else if (exp.X_op == O_big
4526 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32)
4528 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4530 /* Bignums have their least significant bits in
4531 generic_bignum[0]. Make sure we put 32 bits in imm and
4532 32 bits in reg, in a (hopefully) portable way. */
4533 gas_assert (parts != 0);
4535 /* Make sure that the number is not too big.
4536 PR 11972: Bignums can now be sign-extended to the
4537 size of a .octa so check that the out of range bits
4538 are all zero or all one. */
4539 if (LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 64)
4541 LITTLENUM_TYPE m = -1;
4543 if (generic_bignum[parts * 2] != 0
4544 && generic_bignum[parts * 2] != m)
4547 for (j = parts * 2 + 1; j < (unsigned) exp.X_add_number; j++)
4548 if (generic_bignum[j] != generic_bignum[j-1])
4552 inst.operands[i].imm = 0;
4553 for (j = 0; j < parts; j++, idx++)
4554 inst.operands[i].imm |= generic_bignum[idx]
4555 << (LITTLENUM_NUMBER_OF_BITS * j);
4556 inst.operands[i].reg = 0;
4557 for (j = 0; j < parts; j++, idx++)
4558 inst.operands[i].reg |= generic_bignum[idx]
4559 << (LITTLENUM_NUMBER_OF_BITS * j);
4560 inst.operands[i].regisimm = 1;
4570 /* Returns the pseudo-register number of an FPA immediate constant,
4571 or FAIL if there isn't a valid constant here. */
4574 parse_fpa_immediate (char ** str)
4576 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4582 /* First try and match exact strings, this is to guarantee
4583 that some formats will work even for cross assembly. */
4585 for (i = 0; fp_const[i]; i++)
4587 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4591 *str += strlen (fp_const[i]);
4592 if (is_end_of_line[(unsigned char) **str])
4598 /* Just because we didn't get a match doesn't mean that the constant
4599 isn't valid, just that it is in a format that we don't
4600 automatically recognize. Try parsing it with the standard
4601 expression routines. */
4603 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4605 /* Look for a raw floating point number. */
4606 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4607 && is_end_of_line[(unsigned char) *save_in])
4609 for (i = 0; i < NUM_FLOAT_VALS; i++)
4611 for (j = 0; j < MAX_LITTLENUMS; j++)
4613 if (words[j] != fp_values[i][j])
4617 if (j == MAX_LITTLENUMS)
4625 /* Try and parse a more complex expression, this will probably fail
4626 unless the code uses a floating point prefix (eg "0f"). */
4627 save_in = input_line_pointer;
4628 input_line_pointer = *str;
4629 if (expression (&exp) == absolute_section
4630 && exp.X_op == O_big
4631 && exp.X_add_number < 0)
4633 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4635 if (gen_to_words (words, 5, (long) 15) == 0)
4637 for (i = 0; i < NUM_FLOAT_VALS; i++)
4639 for (j = 0; j < MAX_LITTLENUMS; j++)
4641 if (words[j] != fp_values[i][j])
4645 if (j == MAX_LITTLENUMS)
4647 *str = input_line_pointer;
4648 input_line_pointer = save_in;
4655 *str = input_line_pointer;
4656 input_line_pointer = save_in;
4657 inst.error = _("invalid FPA immediate expression");
4661 /* Returns 1 if a number has "quarter-precision" float format
4662 0baBbbbbbc defgh000 00000000 00000000. */
4665 is_quarter_float (unsigned imm)
4667 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4668 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4671 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4672 0baBbbbbbc defgh000 00000000 00000000.
4673 The zero and minus-zero cases need special handling, since they can't be
4674 encoded in the "quarter-precision" float format, but can nonetheless be
4675 loaded as integer constants. */
4678 parse_qfloat_immediate (char **ccp, int *immed)
4682 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4683 int found_fpchar = 0;
4685 skip_past_char (&str, '#');
4687 /* We must not accidentally parse an integer as a floating-point number. Make
4688 sure that the value we parse is not an integer by checking for special
4689 characters '.' or 'e'.
4690 FIXME: This is a horrible hack, but doing better is tricky because type
4691 information isn't in a very usable state at parse time. */
4693 skip_whitespace (fpnum);
4695 if (strncmp (fpnum, "0x", 2) == 0)
4699 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4700 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4710 if ((str = atof_ieee (str, 's', words)) != NULL)
4712 unsigned fpword = 0;
4715 /* Our FP word must be 32 bits (single-precision FP). */
4716 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4718 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4722 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4735 /* Shift operands. */
4738 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4741 struct asm_shift_name
4744 enum shift_kind kind;
4747 /* Third argument to parse_shift. */
4748 enum parse_shift_mode
4750 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4751 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4752 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4753 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4754 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4757 /* Parse a <shift> specifier on an ARM data processing instruction.
4758 This has three forms:
4760 (LSL|LSR|ASL|ASR|ROR) Rs
4761 (LSL|LSR|ASL|ASR|ROR) #imm
4764 Note that ASL is assimilated to LSL in the instruction encoding, and
4765 RRX to ROR #0 (which cannot be written as such). */
4768 parse_shift (char **str, int i, enum parse_shift_mode mode)
4770 const struct asm_shift_name *shift_name;
4771 enum shift_kind shift;
4776 for (p = *str; ISALPHA (*p); p++)
4781 inst.error = _("shift expression expected");
4785 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
4788 if (shift_name == NULL)
4790 inst.error = _("shift expression expected");
4794 shift = shift_name->kind;
4798 case NO_SHIFT_RESTRICT:
4799 case SHIFT_IMMEDIATE: break;
4801 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4802 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4804 inst.error = _("'LSL' or 'ASR' required");
4809 case SHIFT_LSL_IMMEDIATE:
4810 if (shift != SHIFT_LSL)
4812 inst.error = _("'LSL' required");
4817 case SHIFT_ASR_IMMEDIATE:
4818 if (shift != SHIFT_ASR)
4820 inst.error = _("'ASR' required");
4828 if (shift != SHIFT_RRX)
4830 /* Whitespace can appear here if the next thing is a bare digit. */
4831 skip_whitespace (p);
4833 if (mode == NO_SHIFT_RESTRICT
4834 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4836 inst.operands[i].imm = reg;
4837 inst.operands[i].immisreg = 1;
4839 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4842 inst.operands[i].shift_kind = shift;
4843 inst.operands[i].shifted = 1;
4848 /* Parse a <shifter_operand> for an ARM data processing instruction:
4851 #<immediate>, <rotate>
4855 where <shift> is defined by parse_shift above, and <rotate> is a
4856 multiple of 2 between 0 and 30. Validation of immediate operands
4857 is deferred to md_apply_fix. */
4860 parse_shifter_operand (char **str, int i)
4865 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4867 inst.operands[i].reg = value;
4868 inst.operands[i].isreg = 1;
4870 /* parse_shift will override this if appropriate */
4871 inst.reloc.exp.X_op = O_constant;
4872 inst.reloc.exp.X_add_number = 0;
4874 if (skip_past_comma (str) == FAIL)
4877 /* Shift operation on register. */
4878 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4881 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4884 if (skip_past_comma (str) == SUCCESS)
4886 /* #x, y -- ie explicit rotation by Y. */
4887 if (my_get_expression (&exp, str, GE_NO_PREFIX))
4890 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4892 inst.error = _("constant expression expected");
4896 value = exp.X_add_number;
4897 if (value < 0 || value > 30 || value % 2 != 0)
4899 inst.error = _("invalid rotation");
4902 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4904 inst.error = _("invalid constant");
4908 /* Encode as specified. */
4909 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
4913 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4914 inst.reloc.pc_rel = 0;
4918 /* Group relocation information. Each entry in the table contains the
4919 textual name of the relocation as may appear in assembler source
4920 and must end with a colon.
4921 Along with this textual name are the relocation codes to be used if
4922 the corresponding instruction is an ALU instruction (ADD or SUB only),
4923 an LDR, an LDRS, or an LDC. */
4925 struct group_reloc_table_entry
4936 /* Varieties of non-ALU group relocation. */
4943 static struct group_reloc_table_entry group_reloc_table[] =
4944 { /* Program counter relative: */
4946 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4951 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4952 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4953 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4954 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4956 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4961 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4962 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4963 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4964 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4966 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4967 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4968 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4969 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4970 /* Section base relative */
4972 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4977 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4978 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4979 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4980 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4982 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4987 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4988 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4989 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4990 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4992 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4993 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4994 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4995 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4997 /* Given the address of a pointer pointing to the textual name of a group
4998 relocation as may appear in assembler source, attempt to find its details
4999 in group_reloc_table. The pointer will be updated to the character after
5000 the trailing colon. On failure, FAIL will be returned; SUCCESS
5001 otherwise. On success, *entry will be updated to point at the relevant
5002 group_reloc_table entry. */
5005 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5008 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5010 int length = strlen (group_reloc_table[i].name);
5012 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5013 && (*str)[length] == ':')
5015 *out = &group_reloc_table[i];
5016 *str += (length + 1);
5024 /* Parse a <shifter_operand> for an ARM data processing instruction
5025 (as for parse_shifter_operand) where group relocations are allowed:
5028 #<immediate>, <rotate>
5029 #:<group_reloc>:<expression>
5033 where <group_reloc> is one of the strings defined in group_reloc_table.
5034 The hashes are optional.
5036 Everything else is as for parse_shifter_operand. */
5038 static parse_operand_result
5039 parse_shifter_operand_group_reloc (char **str, int i)
5041 /* Determine if we have the sequence of characters #: or just :
5042 coming next. If we do, then we check for a group relocation.
5043 If we don't, punt the whole lot to parse_shifter_operand. */
5045 if (((*str)[0] == '#' && (*str)[1] == ':')
5046 || (*str)[0] == ':')
5048 struct group_reloc_table_entry *entry;
5050 if ((*str)[0] == '#')
5055 /* Try to parse a group relocation. Anything else is an error. */
5056 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5058 inst.error = _("unknown group relocation");
5059 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5062 /* We now have the group relocation table entry corresponding to
5063 the name in the assembler source. Next, we parse the expression. */
5064 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5065 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5067 /* Record the relocation type (always the ALU variant here). */
5068 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5069 gas_assert (inst.reloc.type != 0);
5071 return PARSE_OPERAND_SUCCESS;
5074 return parse_shifter_operand (str, i) == SUCCESS
5075 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5077 /* Never reached. */
5080 /* Parse a Neon alignment expression. Information is written to
5081 inst.operands[i]. We assume the initial ':' has been skipped.
5083 align .imm = align << 8, .immisalign=1, .preind=0 */
5084 static parse_operand_result
5085 parse_neon_alignment (char **str, int i)
5090 my_get_expression (&exp, &p, GE_NO_PREFIX);
5092 if (exp.X_op != O_constant)
5094 inst.error = _("alignment must be constant");
5095 return PARSE_OPERAND_FAIL;
5098 inst.operands[i].imm = exp.X_add_number << 8;
5099 inst.operands[i].immisalign = 1;
5100 /* Alignments are not pre-indexes. */
5101 inst.operands[i].preind = 0;
5104 return PARSE_OPERAND_SUCCESS;
5107 /* Parse all forms of an ARM address expression. Information is written
5108 to inst.operands[i] and/or inst.reloc.
5110 Preindexed addressing (.preind=1):
5112 [Rn, #offset] .reg=Rn .reloc.exp=offset
5113 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5114 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5115 .shift_kind=shift .reloc.exp=shift_imm
5117 These three may have a trailing ! which causes .writeback to be set also.
5119 Postindexed addressing (.postind=1, .writeback=1):
5121 [Rn], #offset .reg=Rn .reloc.exp=offset
5122 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5123 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5124 .shift_kind=shift .reloc.exp=shift_imm
5126 Unindexed addressing (.preind=0, .postind=0):
5128 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5132 [Rn]{!} shorthand for [Rn,#0]{!}
5133 =immediate .isreg=0 .reloc.exp=immediate
5134 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5136 It is the caller's responsibility to check for addressing modes not
5137 supported by the instruction, and to set inst.reloc.type. */
5139 static parse_operand_result
5140 parse_address_main (char **str, int i, int group_relocations,
5141 group_reloc_type group_type)
5146 if (skip_past_char (&p, '[') == FAIL)
5148 if (skip_past_char (&p, '=') == FAIL)
5150 /* Bare address - translate to PC-relative offset. */
5151 inst.reloc.pc_rel = 1;
5152 inst.operands[i].reg = REG_PC;
5153 inst.operands[i].isreg = 1;
5154 inst.operands[i].preind = 1;
5156 /* Otherwise a load-constant pseudo op, no special treatment needed here. */
5158 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5159 return PARSE_OPERAND_FAIL;
5162 return PARSE_OPERAND_SUCCESS;
5165 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5167 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5168 return PARSE_OPERAND_FAIL;
5170 inst.operands[i].reg = reg;
5171 inst.operands[i].isreg = 1;
5173 if (skip_past_comma (&p) == SUCCESS)
5175 inst.operands[i].preind = 1;
5178 else if (*p == '-') p++, inst.operands[i].negative = 1;
5180 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5182 inst.operands[i].imm = reg;
5183 inst.operands[i].immisreg = 1;
5185 if (skip_past_comma (&p) == SUCCESS)
5186 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5187 return PARSE_OPERAND_FAIL;
5189 else if (skip_past_char (&p, ':') == SUCCESS)
5191 /* FIXME: '@' should be used here, but it's filtered out by generic
5192 code before we get to see it here. This may be subject to
5194 parse_operand_result result = parse_neon_alignment (&p, i);
5196 if (result != PARSE_OPERAND_SUCCESS)
5201 if (inst.operands[i].negative)
5203 inst.operands[i].negative = 0;
5207 if (group_relocations
5208 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5210 struct group_reloc_table_entry *entry;
5212 /* Skip over the #: or : sequence. */
5218 /* Try to parse a group relocation. Anything else is an
5220 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5222 inst.error = _("unknown group relocation");
5223 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5226 /* We now have the group relocation table entry corresponding to
5227 the name in the assembler source. Next, we parse the
5229 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5230 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5232 /* Record the relocation type. */
5236 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5240 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5244 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5251 if (inst.reloc.type == 0)
5253 inst.error = _("this group relocation is not allowed on this instruction");
5254 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5260 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5261 return PARSE_OPERAND_FAIL;
5262 /* If the offset is 0, find out if it's a +0 or -0. */
5263 if (inst.reloc.exp.X_op == O_constant
5264 && inst.reloc.exp.X_add_number == 0)
5266 skip_whitespace (q);
5270 skip_whitespace (q);
5273 inst.operands[i].negative = 1;
5278 else if (skip_past_char (&p, ':') == SUCCESS)
5280 /* FIXME: '@' should be used here, but it's filtered out by generic code
5281 before we get to see it here. This may be subject to change. */
5282 parse_operand_result result = parse_neon_alignment (&p, i);
5284 if (result != PARSE_OPERAND_SUCCESS)
5288 if (skip_past_char (&p, ']') == FAIL)
5290 inst.error = _("']' expected");
5291 return PARSE_OPERAND_FAIL;
5294 if (skip_past_char (&p, '!') == SUCCESS)
5295 inst.operands[i].writeback = 1;
5297 else if (skip_past_comma (&p) == SUCCESS)
5299 if (skip_past_char (&p, '{') == SUCCESS)
5301 /* [Rn], {expr} - unindexed, with option */
5302 if (parse_immediate (&p, &inst.operands[i].imm,
5303 0, 255, TRUE) == FAIL)
5304 return PARSE_OPERAND_FAIL;
5306 if (skip_past_char (&p, '}') == FAIL)
5308 inst.error = _("'}' expected at end of 'option' field");
5309 return PARSE_OPERAND_FAIL;
5311 if (inst.operands[i].preind)
5313 inst.error = _("cannot combine index with option");
5314 return PARSE_OPERAND_FAIL;
5317 return PARSE_OPERAND_SUCCESS;
5321 inst.operands[i].postind = 1;
5322 inst.operands[i].writeback = 1;
5324 if (inst.operands[i].preind)
5326 inst.error = _("cannot combine pre- and post-indexing");
5327 return PARSE_OPERAND_FAIL;
5331 else if (*p == '-') p++, inst.operands[i].negative = 1;
5333 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5335 /* We might be using the immediate for alignment already. If we
5336 are, OR the register number into the low-order bits. */
5337 if (inst.operands[i].immisalign)
5338 inst.operands[i].imm |= reg;
5340 inst.operands[i].imm = reg;
5341 inst.operands[i].immisreg = 1;
5343 if (skip_past_comma (&p) == SUCCESS)
5344 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5345 return PARSE_OPERAND_FAIL;
5350 if (inst.operands[i].negative)
5352 inst.operands[i].negative = 0;
5355 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5356 return PARSE_OPERAND_FAIL;
5357 /* If the offset is 0, find out if it's a +0 or -0. */
5358 if (inst.reloc.exp.X_op == O_constant
5359 && inst.reloc.exp.X_add_number == 0)
5361 skip_whitespace (q);
5365 skip_whitespace (q);
5368 inst.operands[i].negative = 1;
5374 /* If at this point neither .preind nor .postind is set, we have a
5375 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5376 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5378 inst.operands[i].preind = 1;
5379 inst.reloc.exp.X_op = O_constant;
5380 inst.reloc.exp.X_add_number = 0;
5383 return PARSE_OPERAND_SUCCESS;
5387 parse_address (char **str, int i)
5389 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5393 static parse_operand_result
5394 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5396 return parse_address_main (str, i, 1, type);
5399 /* Parse an operand for a MOVW or MOVT instruction. */
5401 parse_half (char **str)
5406 skip_past_char (&p, '#');
5407 if (strncasecmp (p, ":lower16:", 9) == 0)
5408 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5409 else if (strncasecmp (p, ":upper16:", 9) == 0)
5410 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5412 if (inst.reloc.type != BFD_RELOC_UNUSED)
5415 skip_whitespace (p);
5418 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5421 if (inst.reloc.type == BFD_RELOC_UNUSED)
5423 if (inst.reloc.exp.X_op != O_constant)
5425 inst.error = _("constant expression expected");
5428 if (inst.reloc.exp.X_add_number < 0
5429 || inst.reloc.exp.X_add_number > 0xffff)
5431 inst.error = _("immediate value out of range");
5439 /* Miscellaneous. */
5441 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5442 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5444 parse_psr (char **str, bfd_boolean lhs)
5447 unsigned long psr_field;
5448 const struct asm_psr *psr;
5450 bfd_boolean is_apsr = FALSE;
5451 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5453 /* PR gas/12698: If the user has specified -march=all then m_profile will
5454 be TRUE, but we want to ignore it in this case as we are building for any
5455 CPU type, including non-m variants. */
5456 if (selected_cpu.core == arm_arch_any.core)
5459 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5460 feature for ease of use and backwards compatibility. */
5462 if (strncasecmp (p, "SPSR", 4) == 0)
5465 goto unsupported_psr;
5467 psr_field = SPSR_BIT;
5469 else if (strncasecmp (p, "CPSR", 4) == 0)
5472 goto unsupported_psr;
5476 else if (strncasecmp (p, "APSR", 4) == 0)
5478 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5479 and ARMv7-R architecture CPUs. */
5488 while (ISALNUM (*p) || *p == '_');
5490 if (strncasecmp (start, "iapsr", 5) == 0
5491 || strncasecmp (start, "eapsr", 5) == 0
5492 || strncasecmp (start, "xpsr", 4) == 0
5493 || strncasecmp (start, "psr", 3) == 0)
5494 p = start + strcspn (start, "rR") + 1;
5496 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5502 /* If APSR is being written, a bitfield may be specified. Note that
5503 APSR itself is handled above. */
5504 if (psr->field <= 3)
5506 psr_field = psr->field;
5512 /* M-profile MSR instructions have the mask field set to "10", except
5513 *PSR variants which modify APSR, which may use a different mask (and
5514 have been handled already). Do that by setting the PSR_f field
5516 return psr->field | (lhs ? PSR_f : 0);
5519 goto unsupported_psr;
5525 /* A suffix follows. */
5531 while (ISALNUM (*p) || *p == '_');
5535 /* APSR uses a notation for bits, rather than fields. */
5536 unsigned int nzcvq_bits = 0;
5537 unsigned int g_bit = 0;
5540 for (bit = start; bit != p; bit++)
5542 switch (TOLOWER (*bit))
5545 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5549 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5553 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5557 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5561 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5565 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5569 inst.error = _("unexpected bit specified after APSR");
5574 if (nzcvq_bits == 0x1f)
5579 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5581 inst.error = _("selected processor does not "
5582 "support DSP extension");
5589 if ((nzcvq_bits & 0x20) != 0
5590 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5591 || (g_bit & 0x2) != 0)
5593 inst.error = _("bad bitmask specified after APSR");
5599 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5604 psr_field |= psr->field;
5610 goto error; /* Garbage after "[CS]PSR". */
5612 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5613 is deprecated, but allow it anyway. */
5617 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5620 else if (!m_profile)
5621 /* These bits are never right for M-profile devices: don't set them
5622 (only code paths which read/write APSR reach here). */
5623 psr_field |= (PSR_c | PSR_f);
5629 inst.error = _("selected processor does not support requested special "
5630 "purpose register");
5634 inst.error = _("flag for {c}psr instruction expected");
5638 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5639 value suitable for splatting into the AIF field of the instruction. */
5642 parse_cps_flags (char **str)
5651 case '\0': case ',':
5654 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5655 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5656 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5659 inst.error = _("unrecognized CPS flag");
5664 if (saw_a_flag == 0)
5666 inst.error = _("missing CPS flags");
5674 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5675 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5678 parse_endian_specifier (char **str)
5683 if (strncasecmp (s, "BE", 2))
5685 else if (strncasecmp (s, "LE", 2))
5689 inst.error = _("valid endian specifiers are be or le");
5693 if (ISALNUM (s[2]) || s[2] == '_')
5695 inst.error = _("valid endian specifiers are be or le");
5700 return little_endian;
5703 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
5704 value suitable for poking into the rotate field of an sxt or sxta
5705 instruction, or FAIL on error. */
5708 parse_ror (char **str)
5713 if (strncasecmp (s, "ROR", 3) == 0)
5717 inst.error = _("missing rotation field after comma");
5721 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5726 case 0: *str = s; return 0x0;
5727 case 8: *str = s; return 0x1;
5728 case 16: *str = s; return 0x2;
5729 case 24: *str = s; return 0x3;
5732 inst.error = _("rotation can only be 0, 8, 16, or 24");
5737 /* Parse a conditional code (from conds[] below). The value returned is in the
5738 range 0 .. 14, or FAIL. */
5740 parse_cond (char **str)
5743 const struct asm_cond *c;
5745 /* Condition codes are always 2 characters, so matching up to
5746 3 characters is sufficient. */
5751 while (ISALPHA (*q) && n < 3)
5753 cond[n] = TOLOWER (*q);
5758 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
5761 inst.error = _("condition required");
5769 /* Parse an option for a barrier instruction. Returns the encoding for the
5772 parse_barrier (char **str)
5775 const struct asm_barrier_opt *o;
5778 while (ISALPHA (*q))
5781 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
5790 /* Parse the operands of a table branch instruction. Similar to a memory
5793 parse_tb (char **str)
5798 if (skip_past_char (&p, '[') == FAIL)
5800 inst.error = _("'[' expected");
5804 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5806 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5809 inst.operands[0].reg = reg;
5811 if (skip_past_comma (&p) == FAIL)
5813 inst.error = _("',' expected");
5817 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5819 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5822 inst.operands[0].imm = reg;
5824 if (skip_past_comma (&p) == SUCCESS)
5826 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5828 if (inst.reloc.exp.X_add_number != 1)
5830 inst.error = _("invalid shift");
5833 inst.operands[0].shifted = 1;
5836 if (skip_past_char (&p, ']') == FAIL)
5838 inst.error = _("']' expected");
5845 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5846 information on the types the operands can take and how they are encoded.
5847 Up to four operands may be read; this function handles setting the
5848 ".present" field for each read operand itself.
5849 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5850 else returns FAIL. */
5853 parse_neon_mov (char **str, int *which_operand)
5855 int i = *which_operand, val;
5856 enum arm_reg_type rtype;
5858 struct neon_type_el optype;
5860 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5862 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5863 inst.operands[i].reg = val;
5864 inst.operands[i].isscalar = 1;
5865 inst.operands[i].vectype = optype;
5866 inst.operands[i++].present = 1;
5868 if (skip_past_comma (&ptr) == FAIL)
5871 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5874 inst.operands[i].reg = val;
5875 inst.operands[i].isreg = 1;
5876 inst.operands[i].present = 1;
5878 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5881 /* Cases 0, 1, 2, 3, 5 (D only). */
5882 if (skip_past_comma (&ptr) == FAIL)
5885 inst.operands[i].reg = val;
5886 inst.operands[i].isreg = 1;
5887 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5888 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5889 inst.operands[i].isvec = 1;
5890 inst.operands[i].vectype = optype;
5891 inst.operands[i++].present = 1;
5893 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5895 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5896 Case 13: VMOV <Sd>, <Rm> */
5897 inst.operands[i].reg = val;
5898 inst.operands[i].isreg = 1;
5899 inst.operands[i].present = 1;
5901 if (rtype == REG_TYPE_NQ)
5903 first_error (_("can't use Neon quad register here"));
5906 else if (rtype != REG_TYPE_VFS)
5909 if (skip_past_comma (&ptr) == FAIL)
5911 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5913 inst.operands[i].reg = val;
5914 inst.operands[i].isreg = 1;
5915 inst.operands[i].present = 1;
5918 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5921 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5922 Case 1: VMOV<c><q> <Dd>, <Dm>
5923 Case 8: VMOV.F32 <Sd>, <Sm>
5924 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5926 inst.operands[i].reg = val;
5927 inst.operands[i].isreg = 1;
5928 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5929 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5930 inst.operands[i].isvec = 1;
5931 inst.operands[i].vectype = optype;
5932 inst.operands[i].present = 1;
5934 if (skip_past_comma (&ptr) == SUCCESS)
5939 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5942 inst.operands[i].reg = val;
5943 inst.operands[i].isreg = 1;
5944 inst.operands[i++].present = 1;
5946 if (skip_past_comma (&ptr) == FAIL)
5949 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5952 inst.operands[i].reg = val;
5953 inst.operands[i].isreg = 1;
5954 inst.operands[i].present = 1;
5957 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5958 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5959 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5960 Case 10: VMOV.F32 <Sd>, #<imm>
5961 Case 11: VMOV.F64 <Dd>, #<imm> */
5962 inst.operands[i].immisfloat = 1;
5963 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5964 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5965 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5969 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5973 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5976 inst.operands[i].reg = val;
5977 inst.operands[i].isreg = 1;
5978 inst.operands[i++].present = 1;
5980 if (skip_past_comma (&ptr) == FAIL)
5983 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5985 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5986 inst.operands[i].reg = val;
5987 inst.operands[i].isscalar = 1;
5988 inst.operands[i].present = 1;
5989 inst.operands[i].vectype = optype;
5991 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5993 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5994 inst.operands[i].reg = val;
5995 inst.operands[i].isreg = 1;
5996 inst.operands[i++].present = 1;
5998 if (skip_past_comma (&ptr) == FAIL)
6001 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6004 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6008 inst.operands[i].reg = val;
6009 inst.operands[i].isreg = 1;
6010 inst.operands[i].isvec = 1;
6011 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6012 inst.operands[i].vectype = optype;
6013 inst.operands[i].present = 1;
6015 if (rtype == REG_TYPE_VFS)
6019 if (skip_past_comma (&ptr) == FAIL)
6021 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6024 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6027 inst.operands[i].reg = val;
6028 inst.operands[i].isreg = 1;
6029 inst.operands[i].isvec = 1;
6030 inst.operands[i].issingle = 1;
6031 inst.operands[i].vectype = optype;
6032 inst.operands[i].present = 1;
6035 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6039 inst.operands[i].reg = val;
6040 inst.operands[i].isreg = 1;
6041 inst.operands[i].isvec = 1;
6042 inst.operands[i].issingle = 1;
6043 inst.operands[i].vectype = optype;
6044 inst.operands[i].present = 1;
6049 first_error (_("parse error"));
6053 /* Successfully parsed the operands. Update args. */
6059 first_error (_("expected comma"));
6063 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6067 /* Use this macro when the operand constraints are different
6068 for ARM and THUMB (e.g. ldrd). */
6069 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6070 ((arm_operand) | ((thumb_operand) << 16))
6072 /* Matcher codes for parse_operands. */
6073 enum operand_parse_code
6075 OP_stop, /* end of line */
6077 OP_RR, /* ARM register */
6078 OP_RRnpc, /* ARM register, not r15 */
6079 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6080 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6081 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6082 optional trailing ! */
6083 OP_RRw, /* ARM register, not r15, optional trailing ! */
6084 OP_RCP, /* Coprocessor number */
6085 OP_RCN, /* Coprocessor register */
6086 OP_RF, /* FPA register */
6087 OP_RVS, /* VFP single precision register */
6088 OP_RVD, /* VFP double precision register (0..15) */
6089 OP_RND, /* Neon double precision register (0..31) */
6090 OP_RNQ, /* Neon quad precision register */
6091 OP_RVSD, /* VFP single or double precision register */
6092 OP_RNDQ, /* Neon double or quad precision register */
6093 OP_RNSDQ, /* Neon single, double or quad precision register */
6094 OP_RNSC, /* Neon scalar D[X] */
6095 OP_RVC, /* VFP control register */
6096 OP_RMF, /* Maverick F register */
6097 OP_RMD, /* Maverick D register */
6098 OP_RMFX, /* Maverick FX register */
6099 OP_RMDX, /* Maverick DX register */
6100 OP_RMAX, /* Maverick AX register */
6101 OP_RMDS, /* Maverick DSPSC register */
6102 OP_RIWR, /* iWMMXt wR register */
6103 OP_RIWC, /* iWMMXt wC register */
6104 OP_RIWG, /* iWMMXt wCG register */
6105 OP_RXA, /* XScale accumulator register */
6107 OP_REGLST, /* ARM register list */
6108 OP_VRSLST, /* VFP single-precision register list */
6109 OP_VRDLST, /* VFP double-precision register list */
6110 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6111 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6112 OP_NSTRLST, /* Neon element/structure list */
6114 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6115 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6116 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6117 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6118 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6119 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6120 OP_VMOV, /* Neon VMOV operands. */
6121 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6122 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6123 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6125 OP_I0, /* immediate zero */
6126 OP_I7, /* immediate value 0 .. 7 */
6127 OP_I15, /* 0 .. 15 */
6128 OP_I16, /* 1 .. 16 */
6129 OP_I16z, /* 0 .. 16 */
6130 OP_I31, /* 0 .. 31 */
6131 OP_I31w, /* 0 .. 31, optional trailing ! */
6132 OP_I32, /* 1 .. 32 */
6133 OP_I32z, /* 0 .. 32 */
6134 OP_I63, /* 0 .. 63 */
6135 OP_I63s, /* -64 .. 63 */
6136 OP_I64, /* 1 .. 64 */
6137 OP_I64z, /* 0 .. 64 */
6138 OP_I255, /* 0 .. 255 */
6140 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6141 OP_I7b, /* 0 .. 7 */
6142 OP_I15b, /* 0 .. 15 */
6143 OP_I31b, /* 0 .. 31 */
6145 OP_SH, /* shifter operand */
6146 OP_SHG, /* shifter operand with possible group relocation */
6147 OP_ADDR, /* Memory address expression (any mode) */
6148 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6149 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6150 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6151 OP_EXP, /* arbitrary expression */
6152 OP_EXPi, /* same, with optional immediate prefix */
6153 OP_EXPr, /* same, with optional relocation suffix */
6154 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6156 OP_CPSF, /* CPS flags */
6157 OP_ENDI, /* Endianness specifier */
6158 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6159 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6160 OP_COND, /* conditional code */
6161 OP_TB, /* Table branch. */
6163 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6165 OP_RRnpc_I0, /* ARM register or literal 0 */
6166 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6167 OP_RR_EXi, /* ARM register or expression with imm prefix */
6168 OP_RF_IF, /* FPA register or immediate */
6169 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6170 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6172 /* Optional operands. */
6173 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6174 OP_oI31b, /* 0 .. 31 */
6175 OP_oI32b, /* 1 .. 32 */
6176 OP_oI32z, /* 0 .. 32 */
6177 OP_oIffffb, /* 0 .. 65535 */
6178 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6180 OP_oRR, /* ARM register */
6181 OP_oRRnpc, /* ARM register, not the PC */
6182 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6183 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6184 OP_oRND, /* Optional Neon double precision register */
6185 OP_oRNQ, /* Optional Neon quad precision register */
6186 OP_oRNDQ, /* Optional Neon double or quad precision register */
6187 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6188 OP_oSHll, /* LSL immediate */
6189 OP_oSHar, /* ASR immediate */
6190 OP_oSHllar, /* LSL or ASR immediate */
6191 OP_oROR, /* ROR 0/8/16/24 */
6192 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6194 /* Some pre-defined mixed (ARM/THUMB) operands. */
6195 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6196 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6197 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6199 OP_FIRST_OPTIONAL = OP_oI7b
6202 /* Generic instruction operand parser. This does no encoding and no
6203 semantic validation; it merely squirrels values away in the inst
6204 structure. Returns SUCCESS or FAIL depending on whether the
6205 specified grammar matched. */
6207 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6209 unsigned const int *upat = pattern;
6210 char *backtrack_pos = 0;
6211 const char *backtrack_error = 0;
6212 int i, val = 0, backtrack_index = 0;
6213 enum arm_reg_type rtype;
6214 parse_operand_result result;
6215 unsigned int op_parse_code;
6217 #define po_char_or_fail(chr) \
6220 if (skip_past_char (&str, chr) == FAIL) \
6225 #define po_reg_or_fail(regtype) \
6228 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6229 & inst.operands[i].vectype); \
6232 first_error (_(reg_expected_msgs[regtype])); \
6235 inst.operands[i].reg = val; \
6236 inst.operands[i].isreg = 1; \
6237 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6238 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6239 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6240 || rtype == REG_TYPE_VFD \
6241 || rtype == REG_TYPE_NQ); \
6245 #define po_reg_or_goto(regtype, label) \
6248 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6249 & inst.operands[i].vectype); \
6253 inst.operands[i].reg = val; \
6254 inst.operands[i].isreg = 1; \
6255 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6256 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6257 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6258 || rtype == REG_TYPE_VFD \
6259 || rtype == REG_TYPE_NQ); \
6263 #define po_imm_or_fail(min, max, popt) \
6266 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6268 inst.operands[i].imm = val; \
6272 #define po_scalar_or_goto(elsz, label) \
6275 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6278 inst.operands[i].reg = val; \
6279 inst.operands[i].isscalar = 1; \
6283 #define po_misc_or_fail(expr) \
6291 #define po_misc_or_fail_no_backtrack(expr) \
6295 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6296 backtrack_pos = 0; \
6297 if (result != PARSE_OPERAND_SUCCESS) \
6302 #define po_barrier_or_imm(str) \
6305 val = parse_barrier (&str); \
6308 if (ISALPHA (*str)) \
6315 if ((inst.instruction & 0xf0) == 0x60 \
6318 /* ISB can only take SY as an option. */ \
6319 inst.error = _("invalid barrier type"); \
6326 skip_whitespace (str);
6328 for (i = 0; upat[i] != OP_stop; i++)
6330 op_parse_code = upat[i];
6331 if (op_parse_code >= 1<<16)
6332 op_parse_code = thumb ? (op_parse_code >> 16)
6333 : (op_parse_code & ((1<<16)-1));
6335 if (op_parse_code >= OP_FIRST_OPTIONAL)
6337 /* Remember where we are in case we need to backtrack. */
6338 gas_assert (!backtrack_pos);
6339 backtrack_pos = str;
6340 backtrack_error = inst.error;
6341 backtrack_index = i;
6344 if (i > 0 && (i > 1 || inst.operands[0].present))
6345 po_char_or_fail (',');
6347 switch (op_parse_code)
6355 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6356 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6357 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6358 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6359 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6360 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6362 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6364 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6366 /* Also accept generic coprocessor regs for unknown registers. */
6368 po_reg_or_fail (REG_TYPE_CN);
6370 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6371 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6372 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6373 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6374 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6375 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6376 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6377 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6378 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6379 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6381 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6383 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6384 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6386 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6388 /* Neon scalar. Using an element size of 8 means that some invalid
6389 scalars are accepted here, so deal with those in later code. */
6390 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6394 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6397 po_imm_or_fail (0, 0, TRUE);
6402 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6407 po_scalar_or_goto (8, try_rr);
6410 po_reg_or_fail (REG_TYPE_RN);
6416 po_scalar_or_goto (8, try_nsdq);
6419 po_reg_or_fail (REG_TYPE_NSDQ);
6425 po_scalar_or_goto (8, try_ndq);
6428 po_reg_or_fail (REG_TYPE_NDQ);
6434 po_scalar_or_goto (8, try_vfd);
6437 po_reg_or_fail (REG_TYPE_VFD);
6442 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6443 not careful then bad things might happen. */
6444 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6449 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6452 /* There's a possibility of getting a 64-bit immediate here, so
6453 we need special handling. */
6454 if (parse_big_immediate (&str, i) == FAIL)
6456 inst.error = _("immediate value is out of range");
6464 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6467 po_imm_or_fail (0, 63, TRUE);
6472 po_char_or_fail ('[');
6473 po_reg_or_fail (REG_TYPE_RN);
6474 po_char_or_fail (']');
6480 po_reg_or_fail (REG_TYPE_RN);
6481 if (skip_past_char (&str, '!') == SUCCESS)
6482 inst.operands[i].writeback = 1;
6486 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6487 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6488 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6489 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6490 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6491 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6492 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6493 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6494 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6495 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6496 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6497 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6499 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6501 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6502 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6504 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6505 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6506 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6507 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6509 /* Immediate variants */
6511 po_char_or_fail ('{');
6512 po_imm_or_fail (0, 255, TRUE);
6513 po_char_or_fail ('}');
6517 /* The expression parser chokes on a trailing !, so we have
6518 to find it first and zap it. */
6521 while (*s && *s != ',')
6526 inst.operands[i].writeback = 1;
6528 po_imm_or_fail (0, 31, TRUE);
6536 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6541 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6546 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6548 if (inst.reloc.exp.X_op == O_symbol)
6550 val = parse_reloc (&str);
6553 inst.error = _("unrecognized relocation suffix");
6556 else if (val != BFD_RELOC_UNUSED)
6558 inst.operands[i].imm = val;
6559 inst.operands[i].hasreloc = 1;
6564 /* Operand for MOVW or MOVT. */
6566 po_misc_or_fail (parse_half (&str));
6569 /* Register or expression. */
6570 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6571 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6573 /* Register or immediate. */
6574 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6575 I0: po_imm_or_fail (0, 0, FALSE); break;
6577 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6579 if (!is_immediate_prefix (*str))
6582 val = parse_fpa_immediate (&str);
6585 /* FPA immediates are encoded as registers 8-15.
6586 parse_fpa_immediate has already applied the offset. */
6587 inst.operands[i].reg = val;
6588 inst.operands[i].isreg = 1;
6591 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6592 I32z: po_imm_or_fail (0, 32, FALSE); break;
6594 /* Two kinds of register. */
6597 struct reg_entry *rege = arm_reg_parse_multi (&str);
6599 || (rege->type != REG_TYPE_MMXWR
6600 && rege->type != REG_TYPE_MMXWC
6601 && rege->type != REG_TYPE_MMXWCG))
6603 inst.error = _("iWMMXt data or control register expected");
6606 inst.operands[i].reg = rege->number;
6607 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6613 struct reg_entry *rege = arm_reg_parse_multi (&str);
6615 || (rege->type != REG_TYPE_MMXWC
6616 && rege->type != REG_TYPE_MMXWCG))
6618 inst.error = _("iWMMXt control register expected");
6621 inst.operands[i].reg = rege->number;
6622 inst.operands[i].isreg = 1;
6627 case OP_CPSF: val = parse_cps_flags (&str); break;
6628 case OP_ENDI: val = parse_endian_specifier (&str); break;
6629 case OP_oROR: val = parse_ror (&str); break;
6630 case OP_COND: val = parse_cond (&str); break;
6631 case OP_oBARRIER_I15:
6632 po_barrier_or_imm (str); break;
6634 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6640 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6641 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6643 inst.error = _("Banked registers are not available with this "
6649 val = parse_psr (&str, op_parse_code == OP_wPSR);
6653 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6656 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
6658 if (strncasecmp (str, "APSR_", 5) == 0)
6665 case 'c': found = (found & 1) ? 16 : found | 1; break;
6666 case 'n': found = (found & 2) ? 16 : found | 2; break;
6667 case 'z': found = (found & 4) ? 16 : found | 4; break;
6668 case 'v': found = (found & 8) ? 16 : found | 8; break;
6669 default: found = 16;
6673 inst.operands[i].isvec = 1;
6674 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
6675 inst.operands[i].reg = REG_PC;
6682 po_misc_or_fail (parse_tb (&str));
6685 /* Register lists. */
6687 val = parse_reg_list (&str);
6690 inst.operands[1].writeback = 1;
6696 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
6700 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
6704 /* Allow Q registers too. */
6705 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6710 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6712 inst.operands[i].issingle = 1;
6717 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
6722 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
6723 &inst.operands[i].vectype);
6726 /* Addressing modes */
6728 po_misc_or_fail (parse_address (&str, i));
6732 po_misc_or_fail_no_backtrack (
6733 parse_address_group_reloc (&str, i, GROUP_LDR));
6737 po_misc_or_fail_no_backtrack (
6738 parse_address_group_reloc (&str, i, GROUP_LDRS));
6742 po_misc_or_fail_no_backtrack (
6743 parse_address_group_reloc (&str, i, GROUP_LDC));
6747 po_misc_or_fail (parse_shifter_operand (&str, i));
6751 po_misc_or_fail_no_backtrack (
6752 parse_shifter_operand_group_reloc (&str, i));
6756 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
6760 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
6764 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
6768 as_fatal (_("unhandled operand code %d"), op_parse_code);
6771 /* Various value-based sanity checks and shared operations. We
6772 do not signal immediate failures for the register constraints;
6773 this allows a syntax error to take precedence. */
6774 switch (op_parse_code)
6782 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
6783 inst.error = BAD_PC;
6788 if (inst.operands[i].isreg)
6790 if (inst.operands[i].reg == REG_PC)
6791 inst.error = BAD_PC;
6792 else if (inst.operands[i].reg == REG_SP)
6793 inst.error = BAD_SP;
6798 if (inst.operands[i].isreg
6799 && inst.operands[i].reg == REG_PC
6800 && (inst.operands[i].writeback || thumb))
6801 inst.error = BAD_PC;
6810 case OP_oBARRIER_I15:
6819 inst.operands[i].imm = val;
6826 /* If we get here, this operand was successfully parsed. */
6827 inst.operands[i].present = 1;
6831 inst.error = BAD_ARGS;
6836 /* The parse routine should already have set inst.error, but set a
6837 default here just in case. */
6839 inst.error = _("syntax error");
6843 /* Do not backtrack over a trailing optional argument that
6844 absorbed some text. We will only fail again, with the
6845 'garbage following instruction' error message, which is
6846 probably less helpful than the current one. */
6847 if (backtrack_index == i && backtrack_pos != str
6848 && upat[i+1] == OP_stop)
6851 inst.error = _("syntax error");
6855 /* Try again, skipping the optional argument at backtrack_pos. */
6856 str = backtrack_pos;
6857 inst.error = backtrack_error;
6858 inst.operands[backtrack_index].present = 0;
6859 i = backtrack_index;
6863 /* Check that we have parsed all the arguments. */
6864 if (*str != '\0' && !inst.error)
6865 inst.error = _("garbage following instruction");
6867 return inst.error ? FAIL : SUCCESS;
6870 #undef po_char_or_fail
6871 #undef po_reg_or_fail
6872 #undef po_reg_or_goto
6873 #undef po_imm_or_fail
6874 #undef po_scalar_or_fail
6875 #undef po_barrier_or_imm
6877 /* Shorthand macro for instruction encoding functions issuing errors. */
6878 #define constraint(expr, err) \
6889 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
6890 instructions are unpredictable if these registers are used. This
6891 is the BadReg predicate in ARM's Thumb-2 documentation. */
6892 #define reject_bad_reg(reg) \
6894 if (reg == REG_SP || reg == REG_PC) \
6896 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
6901 /* If REG is R13 (the stack pointer), warn that its use is
6903 #define warn_deprecated_sp(reg) \
6905 if (warn_on_deprecated && reg == REG_SP) \
6906 as_warn (_("use of r13 is deprecated")); \
6909 /* Functions for operand encoding. ARM, then Thumb. */
6911 #define rotate_left(v, n) (v << n | v >> (32 - n))
6913 /* If VAL can be encoded in the immediate field of an ARM instruction,
6914 return the encoded form. Otherwise, return FAIL. */
6917 encode_arm_immediate (unsigned int val)
6921 for (i = 0; i < 32; i += 2)
6922 if ((a = rotate_left (val, i)) <= 0xff)
6923 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6928 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6929 return the encoded form. Otherwise, return FAIL. */
6931 encode_thumb32_immediate (unsigned int val)
6938 for (i = 1; i <= 24; i++)
6941 if ((val & ~(0xff << i)) == 0)
6942 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6946 if (val == ((a << 16) | a))
6948 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6952 if (val == ((a << 16) | a))
6953 return 0x200 | (a >> 8);
6957 /* Encode a VFP SP or DP register number into inst.instruction. */
6960 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6962 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6965 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
6968 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6971 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6976 first_error (_("D register out of range for selected VFP version"));
6984 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6988 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6992 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6996 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7000 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7004 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7012 /* Encode a <shift> in an ARM-format instruction. The immediate,
7013 if any, is handled by md_apply_fix. */
7015 encode_arm_shift (int i)
7017 if (inst.operands[i].shift_kind == SHIFT_RRX)
7018 inst.instruction |= SHIFT_ROR << 5;
7021 inst.instruction |= inst.operands[i].shift_kind << 5;
7022 if (inst.operands[i].immisreg)
7024 inst.instruction |= SHIFT_BY_REG;
7025 inst.instruction |= inst.operands[i].imm << 8;
7028 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7033 encode_arm_shifter_operand (int i)
7035 if (inst.operands[i].isreg)
7037 inst.instruction |= inst.operands[i].reg;
7038 encode_arm_shift (i);
7042 inst.instruction |= INST_IMMEDIATE;
7043 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7044 inst.instruction |= inst.operands[i].imm;
7048 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7050 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7053 Generate an error if the operand is not a register. */
7054 constraint (!inst.operands[i].isreg,
7055 _("Instruction does not support =N addresses"));
7057 inst.instruction |= inst.operands[i].reg << 16;
7059 if (inst.operands[i].preind)
7063 inst.error = _("instruction does not accept preindexed addressing");
7066 inst.instruction |= PRE_INDEX;
7067 if (inst.operands[i].writeback)
7068 inst.instruction |= WRITE_BACK;
7071 else if (inst.operands[i].postind)
7073 gas_assert (inst.operands[i].writeback);
7075 inst.instruction |= WRITE_BACK;
7077 else /* unindexed - only for coprocessor */
7079 inst.error = _("instruction does not accept unindexed addressing");
7083 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7084 && (((inst.instruction & 0x000f0000) >> 16)
7085 == ((inst.instruction & 0x0000f000) >> 12)))
7086 as_warn ((inst.instruction & LOAD_BIT)
7087 ? _("destination register same as write-back base")
7088 : _("source register same as write-back base"));
7091 /* inst.operands[i] was set up by parse_address. Encode it into an
7092 ARM-format mode 2 load or store instruction. If is_t is true,
7093 reject forms that cannot be used with a T instruction (i.e. not
7096 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7098 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7100 encode_arm_addr_mode_common (i, is_t);
7102 if (inst.operands[i].immisreg)
7104 constraint ((inst.operands[i].imm == REG_PC
7105 || (is_pc && inst.operands[i].writeback)),
7107 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7108 inst.instruction |= inst.operands[i].imm;
7109 if (!inst.operands[i].negative)
7110 inst.instruction |= INDEX_UP;
7111 if (inst.operands[i].shifted)
7113 if (inst.operands[i].shift_kind == SHIFT_RRX)
7114 inst.instruction |= SHIFT_ROR << 5;
7117 inst.instruction |= inst.operands[i].shift_kind << 5;
7118 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7122 else /* immediate offset in inst.reloc */
7124 if (is_pc && !inst.reloc.pc_rel)
7126 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7128 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7129 cannot use PC in addressing.
7130 PC cannot be used in writeback addressing, either. */
7131 constraint ((is_t || inst.operands[i].writeback),
7134 /* Use of PC in str is deprecated for ARMv7. */
7135 if (warn_on_deprecated
7137 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7138 as_warn (_("use of PC in this instruction is deprecated"));
7141 if (inst.reloc.type == BFD_RELOC_UNUSED)
7143 /* Prefer + for zero encoded value. */
7144 if (!inst.operands[i].negative)
7145 inst.instruction |= INDEX_UP;
7146 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7151 /* inst.operands[i] was set up by parse_address. Encode it into an
7152 ARM-format mode 3 load or store instruction. Reject forms that
7153 cannot be used with such instructions. If is_t is true, reject
7154 forms that cannot be used with a T instruction (i.e. not
7157 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7159 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7161 inst.error = _("instruction does not accept scaled register index");
7165 encode_arm_addr_mode_common (i, is_t);
7167 if (inst.operands[i].immisreg)
7169 constraint ((inst.operands[i].imm == REG_PC
7170 || inst.operands[i].reg == REG_PC),
7172 inst.instruction |= inst.operands[i].imm;
7173 if (!inst.operands[i].negative)
7174 inst.instruction |= INDEX_UP;
7176 else /* immediate offset in inst.reloc */
7178 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7179 && inst.operands[i].writeback),
7181 inst.instruction |= HWOFFSET_IMM;
7182 if (inst.reloc.type == BFD_RELOC_UNUSED)
7184 /* Prefer + for zero encoded value. */
7185 if (!inst.operands[i].negative)
7186 inst.instruction |= INDEX_UP;
7188 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7193 /* inst.operands[i] was set up by parse_address. Encode it into an
7194 ARM-format instruction. Reject all forms which cannot be encoded
7195 into a coprocessor load/store instruction. If wb_ok is false,
7196 reject use of writeback; if unind_ok is false, reject use of
7197 unindexed addressing. If reloc_override is not 0, use it instead
7198 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7199 (in which case it is preserved). */
7202 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7204 inst.instruction |= inst.operands[i].reg << 16;
7206 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7208 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7210 gas_assert (!inst.operands[i].writeback);
7213 inst.error = _("instruction does not support unindexed addressing");
7216 inst.instruction |= inst.operands[i].imm;
7217 inst.instruction |= INDEX_UP;
7221 if (inst.operands[i].preind)
7222 inst.instruction |= PRE_INDEX;
7224 if (inst.operands[i].writeback)
7226 if (inst.operands[i].reg == REG_PC)
7228 inst.error = _("pc may not be used with write-back");
7233 inst.error = _("instruction does not support writeback");
7236 inst.instruction |= WRITE_BACK;
7240 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7241 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7242 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7243 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7246 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7248 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7251 /* Prefer + for zero encoded value. */
7252 if (!inst.operands[i].negative)
7253 inst.instruction |= INDEX_UP;
7258 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7259 Determine whether it can be performed with a move instruction; if
7260 it can, convert inst.instruction to that move instruction and
7261 return TRUE; if it can't, convert inst.instruction to a literal-pool
7262 load and return FALSE. If this is not a valid thing to do in the
7263 current context, set inst.error and return TRUE.
7265 inst.operands[i] describes the destination register. */
7268 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
7273 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7277 if ((inst.instruction & tbit) == 0)
7279 inst.error = _("invalid pseudo operation");
7282 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
7284 inst.error = _("constant expression expected");
7287 if (inst.reloc.exp.X_op == O_constant)
7291 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7293 /* This can be done with a mov(1) instruction. */
7294 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7295 inst.instruction |= inst.reloc.exp.X_add_number;
7301 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7304 /* This can be done with a mov instruction. */
7305 inst.instruction &= LITERAL_MASK;
7306 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7307 inst.instruction |= value & 0xfff;
7311 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7314 /* This can be done with a mvn instruction. */
7315 inst.instruction &= LITERAL_MASK;
7316 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7317 inst.instruction |= value & 0xfff;
7323 if (add_to_lit_pool () == FAIL)
7325 inst.error = _("literal pool insertion failed");
7328 inst.operands[1].reg = REG_PC;
7329 inst.operands[1].isreg = 1;
7330 inst.operands[1].preind = 1;
7331 inst.reloc.pc_rel = 1;
7332 inst.reloc.type = (thumb_p
7333 ? BFD_RELOC_ARM_THUMB_OFFSET
7335 ? BFD_RELOC_ARM_HWLITERAL
7336 : BFD_RELOC_ARM_LITERAL));
7340 /* Functions for instruction encoding, sorted by sub-architecture.
7341 First some generics; their names are taken from the conventional
7342 bit positions for register arguments in ARM format instructions. */
7352 inst.instruction |= inst.operands[0].reg << 12;
7358 inst.instruction |= inst.operands[0].reg << 12;
7359 inst.instruction |= inst.operands[1].reg;
7365 inst.instruction |= inst.operands[0].reg << 12;
7366 inst.instruction |= inst.operands[1].reg << 16;
7372 inst.instruction |= inst.operands[0].reg << 16;
7373 inst.instruction |= inst.operands[1].reg << 12;
7379 unsigned Rn = inst.operands[2].reg;
7380 /* Enforce restrictions on SWP instruction. */
7381 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7383 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
7384 _("Rn must not overlap other operands"));
7386 /* SWP{b} is deprecated for ARMv6* and ARMv7. */
7387 if (warn_on_deprecated
7388 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
7389 as_warn (_("swp{b} use is deprecated for this architecture"));
7392 inst.instruction |= inst.operands[0].reg << 12;
7393 inst.instruction |= inst.operands[1].reg;
7394 inst.instruction |= Rn << 16;
7400 inst.instruction |= inst.operands[0].reg << 12;
7401 inst.instruction |= inst.operands[1].reg << 16;
7402 inst.instruction |= inst.operands[2].reg;
7408 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
7409 constraint (((inst.reloc.exp.X_op != O_constant
7410 && inst.reloc.exp.X_op != O_illegal)
7411 || inst.reloc.exp.X_add_number != 0),
7413 inst.instruction |= inst.operands[0].reg;
7414 inst.instruction |= inst.operands[1].reg << 12;
7415 inst.instruction |= inst.operands[2].reg << 16;
7421 inst.instruction |= inst.operands[0].imm;
7427 inst.instruction |= inst.operands[0].reg << 12;
7428 encode_arm_cp_address (1, TRUE, TRUE, 0);
7431 /* ARM instructions, in alphabetical order by function name (except
7432 that wrapper functions appear immediately after the function they
7435 /* This is a pseudo-op of the form "adr rd, label" to be converted
7436 into a relative address of the form "add rd, pc, #label-.-8". */
7441 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7443 /* Frag hacking will turn this into a sub instruction if the offset turns
7444 out to be negative. */
7445 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7446 inst.reloc.pc_rel = 1;
7447 inst.reloc.exp.X_add_number -= 8;
7450 /* This is a pseudo-op of the form "adrl rd, label" to be converted
7451 into a relative address of the form:
7452 add rd, pc, #low(label-.-8)"
7453 add rd, rd, #high(label-.-8)" */
7458 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
7460 /* Frag hacking will turn this into a sub instruction if the offset turns
7461 out to be negative. */
7462 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
7463 inst.reloc.pc_rel = 1;
7464 inst.size = INSN_SIZE * 2;
7465 inst.reloc.exp.X_add_number -= 8;
7471 if (!inst.operands[1].present)
7472 inst.operands[1].reg = inst.operands[0].reg;
7473 inst.instruction |= inst.operands[0].reg << 12;
7474 inst.instruction |= inst.operands[1].reg << 16;
7475 encode_arm_shifter_operand (2);
7481 if (inst.operands[0].present)
7483 constraint ((inst.instruction & 0xf0) != 0x40
7484 && inst.operands[0].imm > 0xf
7485 && inst.operands[0].imm < 0x0,
7486 _("bad barrier type"));
7487 inst.instruction |= inst.operands[0].imm;
7490 inst.instruction |= 0xf;
7496 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7497 constraint (msb > 32, _("bit-field extends past end of register"));
7498 /* The instruction encoding stores the LSB and MSB,
7499 not the LSB and width. */
7500 inst.instruction |= inst.operands[0].reg << 12;
7501 inst.instruction |= inst.operands[1].imm << 7;
7502 inst.instruction |= (msb - 1) << 16;
7510 /* #0 in second position is alternative syntax for bfc, which is
7511 the same instruction but with REG_PC in the Rm field. */
7512 if (!inst.operands[1].isreg)
7513 inst.operands[1].reg = REG_PC;
7515 msb = inst.operands[2].imm + inst.operands[3].imm;
7516 constraint (msb > 32, _("bit-field extends past end of register"));
7517 /* The instruction encoding stores the LSB and MSB,
7518 not the LSB and width. */
7519 inst.instruction |= inst.operands[0].reg << 12;
7520 inst.instruction |= inst.operands[1].reg;
7521 inst.instruction |= inst.operands[2].imm << 7;
7522 inst.instruction |= (msb - 1) << 16;
7528 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7529 _("bit-field extends past end of register"));
7530 inst.instruction |= inst.operands[0].reg << 12;
7531 inst.instruction |= inst.operands[1].reg;
7532 inst.instruction |= inst.operands[2].imm << 7;
7533 inst.instruction |= (inst.operands[3].imm - 1) << 16;
7536 /* ARM V5 breakpoint instruction (argument parse)
7537 BKPT <16 bit unsigned immediate>
7538 Instruction is not conditional.
7539 The bit pattern given in insns[] has the COND_ALWAYS condition,
7540 and it is an error if the caller tried to override that. */
7545 /* Top 12 of 16 bits to bits 19:8. */
7546 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
7548 /* Bottom 4 of 16 bits to bits 3:0. */
7549 inst.instruction |= inst.operands[0].imm & 0xf;
7553 encode_branch (int default_reloc)
7555 if (inst.operands[0].hasreloc)
7557 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
7558 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
7559 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
7560 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
7561 ? BFD_RELOC_ARM_PLT32
7562 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
7565 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
7566 inst.reloc.pc_rel = 1;
7573 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7574 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7577 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7584 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7586 if (inst.cond == COND_ALWAYS)
7587 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
7589 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
7593 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
7596 /* ARM V5 branch-link-exchange instruction (argument parse)
7597 BLX <target_addr> ie BLX(1)
7598 BLX{<condition>} <Rm> ie BLX(2)
7599 Unfortunately, there are two different opcodes for this mnemonic.
7600 So, the insns[].value is not used, and the code here zaps values
7601 into inst.instruction.
7602 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
7607 if (inst.operands[0].isreg)
7609 /* Arg is a register; the opcode provided by insns[] is correct.
7610 It is not illegal to do "blx pc", just useless. */
7611 if (inst.operands[0].reg == REG_PC)
7612 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
7614 inst.instruction |= inst.operands[0].reg;
7618 /* Arg is an address; this instruction cannot be executed
7619 conditionally, and the opcode must be adjusted.
7620 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
7621 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
7622 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7623 inst.instruction = 0xfa000000;
7624 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
7631 bfd_boolean want_reloc;
7633 if (inst.operands[0].reg == REG_PC)
7634 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
7636 inst.instruction |= inst.operands[0].reg;
7637 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
7638 it is for ARMv4t or earlier. */
7639 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
7640 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
7644 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
7649 inst.reloc.type = BFD_RELOC_ARM_V4BX;
7653 /* ARM v5TEJ. Jump to Jazelle code. */
7658 if (inst.operands[0].reg == REG_PC)
7659 as_tsktsk (_("use of r15 in bxj is not really useful"));
7661 inst.instruction |= inst.operands[0].reg;
7664 /* Co-processor data operation:
7665 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
7666 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
7670 inst.instruction |= inst.operands[0].reg << 8;
7671 inst.instruction |= inst.operands[1].imm << 20;
7672 inst.instruction |= inst.operands[2].reg << 12;
7673 inst.instruction |= inst.operands[3].reg << 16;
7674 inst.instruction |= inst.operands[4].reg;
7675 inst.instruction |= inst.operands[5].imm << 5;
7681 inst.instruction |= inst.operands[0].reg << 16;
7682 encode_arm_shifter_operand (1);
7685 /* Transfer between coprocessor and ARM registers.
7686 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
7691 No special properties. */
7698 Rd = inst.operands[2].reg;
7701 if (inst.instruction == 0xee000010
7702 || inst.instruction == 0xfe000010)
7704 reject_bad_reg (Rd);
7707 constraint (Rd == REG_SP, BAD_SP);
7712 if (inst.instruction == 0xe000010)
7713 constraint (Rd == REG_PC, BAD_PC);
7717 inst.instruction |= inst.operands[0].reg << 8;
7718 inst.instruction |= inst.operands[1].imm << 21;
7719 inst.instruction |= Rd << 12;
7720 inst.instruction |= inst.operands[3].reg << 16;
7721 inst.instruction |= inst.operands[4].reg;
7722 inst.instruction |= inst.operands[5].imm << 5;
7725 /* Transfer between coprocessor register and pair of ARM registers.
7726 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
7731 Two XScale instructions are special cases of these:
7733 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
7734 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
7736 Result unpredictable if Rd or Rn is R15. */
7743 Rd = inst.operands[2].reg;
7744 Rn = inst.operands[3].reg;
7748 reject_bad_reg (Rd);
7749 reject_bad_reg (Rn);
7753 constraint (Rd == REG_PC, BAD_PC);
7754 constraint (Rn == REG_PC, BAD_PC);
7757 inst.instruction |= inst.operands[0].reg << 8;
7758 inst.instruction |= inst.operands[1].imm << 4;
7759 inst.instruction |= Rd << 12;
7760 inst.instruction |= Rn << 16;
7761 inst.instruction |= inst.operands[4].reg;
7767 inst.instruction |= inst.operands[0].imm << 6;
7768 if (inst.operands[1].present)
7770 inst.instruction |= CPSI_MMOD;
7771 inst.instruction |= inst.operands[1].imm;
7778 inst.instruction |= inst.operands[0].imm;
7784 unsigned Rd, Rn, Rm;
7786 Rd = inst.operands[0].reg;
7787 Rn = (inst.operands[1].present
7788 ? inst.operands[1].reg : Rd);
7789 Rm = inst.operands[2].reg;
7791 constraint ((Rd == REG_PC), BAD_PC);
7792 constraint ((Rn == REG_PC), BAD_PC);
7793 constraint ((Rm == REG_PC), BAD_PC);
7795 inst.instruction |= Rd << 16;
7796 inst.instruction |= Rn << 0;
7797 inst.instruction |= Rm << 8;
7803 /* There is no IT instruction in ARM mode. We
7804 process it to do the validation as if in
7805 thumb mode, just in case the code gets
7806 assembled for thumb using the unified syntax. */
7811 set_it_insn_type (IT_INSN);
7812 now_it.mask = (inst.instruction & 0xf) | 0x10;
7813 now_it.cc = inst.operands[0].imm;
7817 /* If there is only one register in the register list,
7818 then return its register number. Otherwise return -1. */
7820 only_one_reg_in_list (int range)
7822 int i = ffs (range) - 1;
7823 return (i > 15 || range != (1 << i)) ? -1 : i;
7827 encode_ldmstm(int from_push_pop_mnem)
7829 int base_reg = inst.operands[0].reg;
7830 int range = inst.operands[1].imm;
7833 inst.instruction |= base_reg << 16;
7834 inst.instruction |= range;
7836 if (inst.operands[1].writeback)
7837 inst.instruction |= LDM_TYPE_2_OR_3;
7839 if (inst.operands[0].writeback)
7841 inst.instruction |= WRITE_BACK;
7842 /* Check for unpredictable uses of writeback. */
7843 if (inst.instruction & LOAD_BIT)
7845 /* Not allowed in LDM type 2. */
7846 if ((inst.instruction & LDM_TYPE_2_OR_3)
7847 && ((range & (1 << REG_PC)) == 0))
7848 as_warn (_("writeback of base register is UNPREDICTABLE"));
7849 /* Only allowed if base reg not in list for other types. */
7850 else if (range & (1 << base_reg))
7851 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
7855 /* Not allowed for type 2. */
7856 if (inst.instruction & LDM_TYPE_2_OR_3)
7857 as_warn (_("writeback of base register is UNPREDICTABLE"));
7858 /* Only allowed if base reg not in list, or first in list. */
7859 else if ((range & (1 << base_reg))
7860 && (range & ((1 << base_reg) - 1)))
7861 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
7865 /* If PUSH/POP has only one register, then use the A2 encoding. */
7866 one_reg = only_one_reg_in_list (range);
7867 if (from_push_pop_mnem && one_reg >= 0)
7869 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
7871 inst.instruction &= A_COND_MASK;
7872 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
7873 inst.instruction |= one_reg << 12;
7880 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
7883 /* ARMv5TE load-consecutive (argument parse)
7892 constraint (inst.operands[0].reg % 2 != 0,
7893 _("first transfer register must be even"));
7894 constraint (inst.operands[1].present
7895 && inst.operands[1].reg != inst.operands[0].reg + 1,
7896 _("can only transfer two consecutive registers"));
7897 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7898 constraint (!inst.operands[2].isreg, _("'[' expected"));
7900 if (!inst.operands[1].present)
7901 inst.operands[1].reg = inst.operands[0].reg + 1;
7903 /* encode_arm_addr_mode_3 will diagnose overlap between the base
7904 register and the first register written; we have to diagnose
7905 overlap between the base and the second register written here. */
7907 if (inst.operands[2].reg == inst.operands[1].reg
7908 && (inst.operands[2].writeback || inst.operands[2].postind))
7909 as_warn (_("base register written back, and overlaps "
7910 "second transfer register"));
7912 if (!(inst.instruction & V4_STR_BIT))
7914 /* For an index-register load, the index register must not overlap the
7915 destination (even if not write-back). */
7916 if (inst.operands[2].immisreg
7917 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
7918 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
7919 as_warn (_("index register overlaps transfer register"));
7921 inst.instruction |= inst.operands[0].reg << 12;
7922 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
7928 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7929 || inst.operands[1].postind || inst.operands[1].writeback
7930 || inst.operands[1].immisreg || inst.operands[1].shifted
7931 || inst.operands[1].negative
7932 /* This can arise if the programmer has written
7934 or if they have mistakenly used a register name as the last
7937 It is very difficult to distinguish between these two cases
7938 because "rX" might actually be a label. ie the register
7939 name has been occluded by a symbol of the same name. So we
7940 just generate a general 'bad addressing mode' type error
7941 message and leave it up to the programmer to discover the
7942 true cause and fix their mistake. */
7943 || (inst.operands[1].reg == REG_PC),
7946 constraint (inst.reloc.exp.X_op != O_constant
7947 || inst.reloc.exp.X_add_number != 0,
7948 _("offset must be zero in ARM encoding"));
7950 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
7952 inst.instruction |= inst.operands[0].reg << 12;
7953 inst.instruction |= inst.operands[1].reg << 16;
7954 inst.reloc.type = BFD_RELOC_UNUSED;
7960 constraint (inst.operands[0].reg % 2 != 0,
7961 _("even register required"));
7962 constraint (inst.operands[1].present
7963 && inst.operands[1].reg != inst.operands[0].reg + 1,
7964 _("can only load two consecutive registers"));
7965 /* If op 1 were present and equal to PC, this function wouldn't
7966 have been called in the first place. */
7967 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
7969 inst.instruction |= inst.operands[0].reg << 12;
7970 inst.instruction |= inst.operands[2].reg << 16;
7973 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
7974 which is not a multiple of four is UNPREDICTABLE. */
7976 check_ldr_r15_aligned (void)
7978 constraint (!(inst.operands[1].immisreg)
7979 && (inst.operands[0].reg == REG_PC
7980 && inst.operands[1].reg == REG_PC
7981 && (inst.reloc.exp.X_add_number & 0x3)),
7982 _("ldr to register 15 must be 4-byte alligned"));
7988 inst.instruction |= inst.operands[0].reg << 12;
7989 if (!inst.operands[1].isreg)
7990 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
7992 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
7993 check_ldr_r15_aligned ();
7999 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8001 if (inst.operands[1].preind)
8003 constraint (inst.reloc.exp.X_op != O_constant
8004 || inst.reloc.exp.X_add_number != 0,
8005 _("this instruction requires a post-indexed address"));
8007 inst.operands[1].preind = 0;
8008 inst.operands[1].postind = 1;
8009 inst.operands[1].writeback = 1;
8011 inst.instruction |= inst.operands[0].reg << 12;
8012 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8015 /* Halfword and signed-byte load/store operations. */
8020 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8021 inst.instruction |= inst.operands[0].reg << 12;
8022 if (!inst.operands[1].isreg)
8023 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
8025 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8031 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8033 if (inst.operands[1].preind)
8035 constraint (inst.reloc.exp.X_op != O_constant
8036 || inst.reloc.exp.X_add_number != 0,
8037 _("this instruction requires a post-indexed address"));
8039 inst.operands[1].preind = 0;
8040 inst.operands[1].postind = 1;
8041 inst.operands[1].writeback = 1;
8043 inst.instruction |= inst.operands[0].reg << 12;
8044 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8047 /* Co-processor register load/store.
8048 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8052 inst.instruction |= inst.operands[0].reg << 8;
8053 inst.instruction |= inst.operands[1].reg << 12;
8054 encode_arm_cp_address (2, TRUE, TRUE, 0);
8060 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8061 if (inst.operands[0].reg == inst.operands[1].reg
8062 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8063 && !(inst.instruction & 0x00400000))
8064 as_tsktsk (_("Rd and Rm should be different in mla"));
8066 inst.instruction |= inst.operands[0].reg << 16;
8067 inst.instruction |= inst.operands[1].reg;
8068 inst.instruction |= inst.operands[2].reg << 8;
8069 inst.instruction |= inst.operands[3].reg << 12;
8075 inst.instruction |= inst.operands[0].reg << 12;
8076 encode_arm_shifter_operand (1);
8079 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8086 top = (inst.instruction & 0x00400000) != 0;
8087 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8088 _(":lower16: not allowed this instruction"));
8089 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8090 _(":upper16: not allowed instruction"));
8091 inst.instruction |= inst.operands[0].reg << 12;
8092 if (inst.reloc.type == BFD_RELOC_UNUSED)
8094 imm = inst.reloc.exp.X_add_number;
8095 /* The value is in two pieces: 0:11, 16:19. */
8096 inst.instruction |= (imm & 0x00000fff);
8097 inst.instruction |= (imm & 0x0000f000) << 4;
8101 static void do_vfp_nsyn_opcode (const char *);
8104 do_vfp_nsyn_mrs (void)
8106 if (inst.operands[0].isvec)
8108 if (inst.operands[1].reg != 1)
8109 first_error (_("operand 1 must be FPSCR"));
8110 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8111 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8112 do_vfp_nsyn_opcode ("fmstat");
8114 else if (inst.operands[1].isvec)
8115 do_vfp_nsyn_opcode ("fmrx");
8123 do_vfp_nsyn_msr (void)
8125 if (inst.operands[0].isvec)
8126 do_vfp_nsyn_opcode ("fmxr");
8136 unsigned Rt = inst.operands[0].reg;
8138 if (thumb_mode && inst.operands[0].reg == REG_SP)
8140 inst.error = BAD_SP;
8144 /* APSR_ sets isvec. All other refs to PC are illegal. */
8145 if (!inst.operands[0].isvec && inst.operands[0].reg == REG_PC)
8147 inst.error = BAD_PC;
8151 switch (inst.operands[1].reg)
8158 inst.instruction |= (inst.operands[1].reg << 16);
8161 first_error (_("operand 1 must be a VFP extension System Register"));
8164 inst.instruction |= (Rt << 12);
8170 unsigned Rt = inst.operands[1].reg;
8173 reject_bad_reg (Rt);
8174 else if (Rt == REG_PC)
8176 inst.error = BAD_PC;
8180 switch (inst.operands[0].reg)
8185 inst.instruction |= (inst.operands[0].reg << 16);
8188 first_error (_("operand 0 must be FPSID or FPSCR pr FPEXC"));
8191 inst.instruction |= (Rt << 12);
8199 if (do_vfp_nsyn_mrs () == SUCCESS)
8202 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8203 inst.instruction |= inst.operands[0].reg << 12;
8205 if (inst.operands[1].isreg)
8207 br = inst.operands[1].reg;
8208 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8209 as_bad (_("bad register for mrs"));
8213 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8214 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8216 _("'APSR', 'CPSR' or 'SPSR' expected"));
8217 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8220 inst.instruction |= br;
8223 /* Two possible forms:
8224 "{C|S}PSR_<field>, Rm",
8225 "{C|S}PSR_f, #expression". */
8230 if (do_vfp_nsyn_msr () == SUCCESS)
8233 inst.instruction |= inst.operands[0].imm;
8234 if (inst.operands[1].isreg)
8235 inst.instruction |= inst.operands[1].reg;
8238 inst.instruction |= INST_IMMEDIATE;
8239 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8240 inst.reloc.pc_rel = 0;
8247 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8249 if (!inst.operands[2].present)
8250 inst.operands[2].reg = inst.operands[0].reg;
8251 inst.instruction |= inst.operands[0].reg << 16;
8252 inst.instruction |= inst.operands[1].reg;
8253 inst.instruction |= inst.operands[2].reg << 8;
8255 if (inst.operands[0].reg == inst.operands[1].reg
8256 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8257 as_tsktsk (_("Rd and Rm should be different in mul"));
8260 /* Long Multiply Parser
8261 UMULL RdLo, RdHi, Rm, Rs
8262 SMULL RdLo, RdHi, Rm, Rs
8263 UMLAL RdLo, RdHi, Rm, Rs
8264 SMLAL RdLo, RdHi, Rm, Rs. */
8269 inst.instruction |= inst.operands[0].reg << 12;
8270 inst.instruction |= inst.operands[1].reg << 16;
8271 inst.instruction |= inst.operands[2].reg;
8272 inst.instruction |= inst.operands[3].reg << 8;
8274 /* rdhi and rdlo must be different. */
8275 if (inst.operands[0].reg == inst.operands[1].reg)
8276 as_tsktsk (_("rdhi and rdlo must be different"));
8278 /* rdhi, rdlo and rm must all be different before armv6. */
8279 if ((inst.operands[0].reg == inst.operands[2].reg
8280 || inst.operands[1].reg == inst.operands[2].reg)
8281 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8282 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8288 if (inst.operands[0].present
8289 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8291 /* Architectural NOP hints are CPSR sets with no bits selected. */
8292 inst.instruction &= 0xf0000000;
8293 inst.instruction |= 0x0320f000;
8294 if (inst.operands[0].present)
8295 inst.instruction |= inst.operands[0].imm;
8299 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8300 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8301 Condition defaults to COND_ALWAYS.
8302 Error if Rd, Rn or Rm are R15. */
8307 inst.instruction |= inst.operands[0].reg << 12;
8308 inst.instruction |= inst.operands[1].reg << 16;
8309 inst.instruction |= inst.operands[2].reg;
8310 if (inst.operands[3].present)
8311 encode_arm_shift (3);
8314 /* ARM V6 PKHTB (Argument Parse). */
8319 if (!inst.operands[3].present)
8321 /* If the shift specifier is omitted, turn the instruction
8322 into pkhbt rd, rm, rn. */
8323 inst.instruction &= 0xfff00010;
8324 inst.instruction |= inst.operands[0].reg << 12;
8325 inst.instruction |= inst.operands[1].reg;
8326 inst.instruction |= inst.operands[2].reg << 16;
8330 inst.instruction |= inst.operands[0].reg << 12;
8331 inst.instruction |= inst.operands[1].reg << 16;
8332 inst.instruction |= inst.operands[2].reg;
8333 encode_arm_shift (3);
8337 /* ARMv5TE: Preload-Cache
8338 MP Extensions: Preload for write
8342 Syntactically, like LDR with B=1, W=0, L=1. */
8347 constraint (!inst.operands[0].isreg,
8348 _("'[' expected after PLD mnemonic"));
8349 constraint (inst.operands[0].postind,
8350 _("post-indexed expression used in preload instruction"));
8351 constraint (inst.operands[0].writeback,
8352 _("writeback used in preload instruction"));
8353 constraint (!inst.operands[0].preind,
8354 _("unindexed addressing used in preload instruction"));
8355 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8358 /* ARMv7: PLI <addr_mode> */
8362 constraint (!inst.operands[0].isreg,
8363 _("'[' expected after PLI mnemonic"));
8364 constraint (inst.operands[0].postind,
8365 _("post-indexed expression used in preload instruction"));
8366 constraint (inst.operands[0].writeback,
8367 _("writeback used in preload instruction"));
8368 constraint (!inst.operands[0].preind,
8369 _("unindexed addressing used in preload instruction"));
8370 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
8371 inst.instruction &= ~PRE_INDEX;
8377 inst.operands[1] = inst.operands[0];
8378 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
8379 inst.operands[0].isreg = 1;
8380 inst.operands[0].writeback = 1;
8381 inst.operands[0].reg = REG_SP;
8382 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
8385 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
8386 word at the specified address and the following word
8388 Unconditionally executed.
8389 Error if Rn is R15. */
8394 inst.instruction |= inst.operands[0].reg << 16;
8395 if (inst.operands[0].writeback)
8396 inst.instruction |= WRITE_BACK;
8399 /* ARM V6 ssat (argument parse). */
8404 inst.instruction |= inst.operands[0].reg << 12;
8405 inst.instruction |= (inst.operands[1].imm - 1) << 16;
8406 inst.instruction |= inst.operands[2].reg;
8408 if (inst.operands[3].present)
8409 encode_arm_shift (3);
8412 /* ARM V6 usat (argument parse). */
8417 inst.instruction |= inst.operands[0].reg << 12;
8418 inst.instruction |= inst.operands[1].imm << 16;
8419 inst.instruction |= inst.operands[2].reg;
8421 if (inst.operands[3].present)
8422 encode_arm_shift (3);
8425 /* ARM V6 ssat16 (argument parse). */
8430 inst.instruction |= inst.operands[0].reg << 12;
8431 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
8432 inst.instruction |= inst.operands[2].reg;
8438 inst.instruction |= inst.operands[0].reg << 12;
8439 inst.instruction |= inst.operands[1].imm << 16;
8440 inst.instruction |= inst.operands[2].reg;
8443 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
8444 preserving the other bits.
8446 setend <endian_specifier>, where <endian_specifier> is either
8452 if (inst.operands[0].imm)
8453 inst.instruction |= 0x200;
8459 unsigned int Rm = (inst.operands[1].present
8460 ? inst.operands[1].reg
8461 : inst.operands[0].reg);
8463 inst.instruction |= inst.operands[0].reg << 12;
8464 inst.instruction |= Rm;
8465 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
8467 inst.instruction |= inst.operands[2].reg << 8;
8468 inst.instruction |= SHIFT_BY_REG;
8469 /* PR 12854: Error on extraneous shifts. */
8470 constraint (inst.operands[2].shifted,
8471 _("extraneous shift as part of operand to shift insn"));
8474 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
8480 inst.reloc.type = BFD_RELOC_ARM_SMC;
8481 inst.reloc.pc_rel = 0;
8487 inst.reloc.type = BFD_RELOC_ARM_HVC;
8488 inst.reloc.pc_rel = 0;
8494 inst.reloc.type = BFD_RELOC_ARM_SWI;
8495 inst.reloc.pc_rel = 0;
8498 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
8499 SMLAxy{cond} Rd,Rm,Rs,Rn
8500 SMLAWy{cond} Rd,Rm,Rs,Rn
8501 Error if any register is R15. */
8506 inst.instruction |= inst.operands[0].reg << 16;
8507 inst.instruction |= inst.operands[1].reg;
8508 inst.instruction |= inst.operands[2].reg << 8;
8509 inst.instruction |= inst.operands[3].reg << 12;
8512 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
8513 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
8514 Error if any register is R15.
8515 Warning if Rdlo == Rdhi. */
8520 inst.instruction |= inst.operands[0].reg << 12;
8521 inst.instruction |= inst.operands[1].reg << 16;
8522 inst.instruction |= inst.operands[2].reg;
8523 inst.instruction |= inst.operands[3].reg << 8;
8525 if (inst.operands[0].reg == inst.operands[1].reg)
8526 as_tsktsk (_("rdhi and rdlo must be different"));
8529 /* ARM V5E (El Segundo) signed-multiply (argument parse)
8530 SMULxy{cond} Rd,Rm,Rs
8531 Error if any register is R15. */
8536 inst.instruction |= inst.operands[0].reg << 16;
8537 inst.instruction |= inst.operands[1].reg;
8538 inst.instruction |= inst.operands[2].reg << 8;
8541 /* ARM V6 srs (argument parse). The variable fields in the encoding are
8542 the same for both ARM and Thumb-2. */
8549 if (inst.operands[0].present)
8551 reg = inst.operands[0].reg;
8552 constraint (reg != REG_SP, _("SRS base register must be r13"));
8557 inst.instruction |= reg << 16;
8558 inst.instruction |= inst.operands[1].imm;
8559 if (inst.operands[0].writeback || inst.operands[1].writeback)
8560 inst.instruction |= WRITE_BACK;
8563 /* ARM V6 strex (argument parse). */
8568 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8569 || inst.operands[2].postind || inst.operands[2].writeback
8570 || inst.operands[2].immisreg || inst.operands[2].shifted
8571 || inst.operands[2].negative
8572 /* See comment in do_ldrex(). */
8573 || (inst.operands[2].reg == REG_PC),
8576 constraint (inst.operands[0].reg == inst.operands[1].reg
8577 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8579 constraint (inst.reloc.exp.X_op != O_constant
8580 || inst.reloc.exp.X_add_number != 0,
8581 _("offset must be zero in ARM encoding"));
8583 inst.instruction |= inst.operands[0].reg << 12;
8584 inst.instruction |= inst.operands[1].reg;
8585 inst.instruction |= inst.operands[2].reg << 16;
8586 inst.reloc.type = BFD_RELOC_UNUSED;
8592 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8593 || inst.operands[2].postind || inst.operands[2].writeback
8594 || inst.operands[2].immisreg || inst.operands[2].shifted
8595 || inst.operands[2].negative,
8598 constraint (inst.operands[0].reg == inst.operands[1].reg
8599 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
8607 constraint (inst.operands[1].reg % 2 != 0,
8608 _("even register required"));
8609 constraint (inst.operands[2].present
8610 && inst.operands[2].reg != inst.operands[1].reg + 1,
8611 _("can only store two consecutive registers"));
8612 /* If op 2 were present and equal to PC, this function wouldn't
8613 have been called in the first place. */
8614 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
8616 constraint (inst.operands[0].reg == inst.operands[1].reg
8617 || inst.operands[0].reg == inst.operands[1].reg + 1
8618 || inst.operands[0].reg == inst.operands[3].reg,
8621 inst.instruction |= inst.operands[0].reg << 12;
8622 inst.instruction |= inst.operands[1].reg;
8623 inst.instruction |= inst.operands[3].reg << 16;
8626 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
8627 extends it to 32-bits, and adds the result to a value in another
8628 register. You can specify a rotation by 0, 8, 16, or 24 bits
8629 before extracting the 16-bit value.
8630 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
8631 Condition defaults to COND_ALWAYS.
8632 Error if any register uses R15. */
8637 inst.instruction |= inst.operands[0].reg << 12;
8638 inst.instruction |= inst.operands[1].reg << 16;
8639 inst.instruction |= inst.operands[2].reg;
8640 inst.instruction |= inst.operands[3].imm << 10;
8645 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
8646 Condition defaults to COND_ALWAYS.
8647 Error if any register uses R15. */
8652 inst.instruction |= inst.operands[0].reg << 12;
8653 inst.instruction |= inst.operands[1].reg;
8654 inst.instruction |= inst.operands[2].imm << 10;
8657 /* VFP instructions. In a logical order: SP variant first, monad
8658 before dyad, arithmetic then move then load/store. */
8661 do_vfp_sp_monadic (void)
8663 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8664 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8668 do_vfp_sp_dyadic (void)
8670 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8671 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8672 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8676 do_vfp_sp_compare_z (void)
8678 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8682 do_vfp_dp_sp_cvt (void)
8684 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8685 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
8689 do_vfp_sp_dp_cvt (void)
8691 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8692 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8696 do_vfp_reg_from_sp (void)
8698 inst.instruction |= inst.operands[0].reg << 12;
8699 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
8703 do_vfp_reg2_from_sp2 (void)
8705 constraint (inst.operands[2].imm != 2,
8706 _("only two consecutive VFP SP registers allowed here"));
8707 inst.instruction |= inst.operands[0].reg << 12;
8708 inst.instruction |= inst.operands[1].reg << 16;
8709 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
8713 do_vfp_sp_from_reg (void)
8715 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
8716 inst.instruction |= inst.operands[1].reg << 12;
8720 do_vfp_sp2_from_reg2 (void)
8722 constraint (inst.operands[0].imm != 2,
8723 _("only two consecutive VFP SP registers allowed here"));
8724 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
8725 inst.instruction |= inst.operands[1].reg << 12;
8726 inst.instruction |= inst.operands[2].reg << 16;
8730 do_vfp_sp_ldst (void)
8732 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8733 encode_arm_cp_address (1, FALSE, TRUE, 0);
8737 do_vfp_dp_ldst (void)
8739 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8740 encode_arm_cp_address (1, FALSE, TRUE, 0);
8745 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
8747 if (inst.operands[0].writeback)
8748 inst.instruction |= WRITE_BACK;
8750 constraint (ldstm_type != VFP_LDSTMIA,
8751 _("this addressing mode requires base-register writeback"));
8752 inst.instruction |= inst.operands[0].reg << 16;
8753 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
8754 inst.instruction |= inst.operands[1].imm;
8758 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
8762 if (inst.operands[0].writeback)
8763 inst.instruction |= WRITE_BACK;
8765 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
8766 _("this addressing mode requires base-register writeback"));
8768 inst.instruction |= inst.operands[0].reg << 16;
8769 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8771 count = inst.operands[1].imm << 1;
8772 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
8775 inst.instruction |= count;
8779 do_vfp_sp_ldstmia (void)
8781 vfp_sp_ldstm (VFP_LDSTMIA);
8785 do_vfp_sp_ldstmdb (void)
8787 vfp_sp_ldstm (VFP_LDSTMDB);
8791 do_vfp_dp_ldstmia (void)
8793 vfp_dp_ldstm (VFP_LDSTMIA);
8797 do_vfp_dp_ldstmdb (void)
8799 vfp_dp_ldstm (VFP_LDSTMDB);
8803 do_vfp_xp_ldstmia (void)
8805 vfp_dp_ldstm (VFP_LDSTMIAX);
8809 do_vfp_xp_ldstmdb (void)
8811 vfp_dp_ldstm (VFP_LDSTMDBX);
8815 do_vfp_dp_rd_rm (void)
8817 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8818 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
8822 do_vfp_dp_rn_rd (void)
8824 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
8825 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8829 do_vfp_dp_rd_rn (void)
8831 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8832 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8836 do_vfp_dp_rd_rn_rm (void)
8838 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8839 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
8840 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
8846 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8850 do_vfp_dp_rm_rd_rn (void)
8852 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
8853 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
8854 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
8857 /* VFPv3 instructions. */
8859 do_vfp_sp_const (void)
8861 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8862 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8863 inst.instruction |= (inst.operands[1].imm & 0x0f);
8867 do_vfp_dp_const (void)
8869 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8870 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
8871 inst.instruction |= (inst.operands[1].imm & 0x0f);
8875 vfp_conv (int srcsize)
8877 int immbits = srcsize - inst.operands[1].imm;
8879 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
8881 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
8882 i.e. immbits must be in range 0 - 16. */
8883 inst.error = _("immediate value out of range, expected range [0, 16]");
8886 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
8888 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
8889 i.e. immbits must be in range 0 - 31. */
8890 inst.error = _("immediate value out of range, expected range [1, 32]");
8894 inst.instruction |= (immbits & 1) << 5;
8895 inst.instruction |= (immbits >> 1);
8899 do_vfp_sp_conv_16 (void)
8901 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8906 do_vfp_dp_conv_16 (void)
8908 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8913 do_vfp_sp_conv_32 (void)
8915 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
8920 do_vfp_dp_conv_32 (void)
8922 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
8926 /* FPA instructions. Also in a logical order. */
8931 inst.instruction |= inst.operands[0].reg << 16;
8932 inst.instruction |= inst.operands[1].reg;
8936 do_fpa_ldmstm (void)
8938 inst.instruction |= inst.operands[0].reg << 12;
8939 switch (inst.operands[1].imm)
8941 case 1: inst.instruction |= CP_T_X; break;
8942 case 2: inst.instruction |= CP_T_Y; break;
8943 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
8948 if (inst.instruction & (PRE_INDEX | INDEX_UP))
8950 /* The instruction specified "ea" or "fd", so we can only accept
8951 [Rn]{!}. The instruction does not really support stacking or
8952 unstacking, so we have to emulate these by setting appropriate
8953 bits and offsets. */
8954 constraint (inst.reloc.exp.X_op != O_constant
8955 || inst.reloc.exp.X_add_number != 0,
8956 _("this instruction does not support indexing"));
8958 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
8959 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
8961 if (!(inst.instruction & INDEX_UP))
8962 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
8964 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
8966 inst.operands[2].preind = 0;
8967 inst.operands[2].postind = 1;
8971 encode_arm_cp_address (2, TRUE, TRUE, 0);
8974 /* iWMMXt instructions: strictly in alphabetical order. */
8977 do_iwmmxt_tandorc (void)
8979 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
8983 do_iwmmxt_textrc (void)
8985 inst.instruction |= inst.operands[0].reg << 12;
8986 inst.instruction |= inst.operands[1].imm;
8990 do_iwmmxt_textrm (void)
8992 inst.instruction |= inst.operands[0].reg << 12;
8993 inst.instruction |= inst.operands[1].reg << 16;
8994 inst.instruction |= inst.operands[2].imm;
8998 do_iwmmxt_tinsr (void)
9000 inst.instruction |= inst.operands[0].reg << 16;
9001 inst.instruction |= inst.operands[1].reg << 12;
9002 inst.instruction |= inst.operands[2].imm;
9006 do_iwmmxt_tmia (void)
9008 inst.instruction |= inst.operands[0].reg << 5;
9009 inst.instruction |= inst.operands[1].reg;
9010 inst.instruction |= inst.operands[2].reg << 12;
9014 do_iwmmxt_waligni (void)
9016 inst.instruction |= inst.operands[0].reg << 12;
9017 inst.instruction |= inst.operands[1].reg << 16;
9018 inst.instruction |= inst.operands[2].reg;
9019 inst.instruction |= inst.operands[3].imm << 20;
9023 do_iwmmxt_wmerge (void)
9025 inst.instruction |= inst.operands[0].reg << 12;
9026 inst.instruction |= inst.operands[1].reg << 16;
9027 inst.instruction |= inst.operands[2].reg;
9028 inst.instruction |= inst.operands[3].imm << 21;
9032 do_iwmmxt_wmov (void)
9034 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9035 inst.instruction |= inst.operands[0].reg << 12;
9036 inst.instruction |= inst.operands[1].reg << 16;
9037 inst.instruction |= inst.operands[1].reg;
9041 do_iwmmxt_wldstbh (void)
9044 inst.instruction |= inst.operands[0].reg << 12;
9046 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9048 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9049 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9053 do_iwmmxt_wldstw (void)
9055 /* RIWR_RIWC clears .isreg for a control register. */
9056 if (!inst.operands[0].isreg)
9058 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9059 inst.instruction |= 0xf0000000;
9062 inst.instruction |= inst.operands[0].reg << 12;
9063 encode_arm_cp_address (1, TRUE, TRUE, 0);
9067 do_iwmmxt_wldstd (void)
9069 inst.instruction |= inst.operands[0].reg << 12;
9070 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9071 && inst.operands[1].immisreg)
9073 inst.instruction &= ~0x1a000ff;
9074 inst.instruction |= (0xf << 28);
9075 if (inst.operands[1].preind)
9076 inst.instruction |= PRE_INDEX;
9077 if (!inst.operands[1].negative)
9078 inst.instruction |= INDEX_UP;
9079 if (inst.operands[1].writeback)
9080 inst.instruction |= WRITE_BACK;
9081 inst.instruction |= inst.operands[1].reg << 16;
9082 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9083 inst.instruction |= inst.operands[1].imm;
9086 encode_arm_cp_address (1, TRUE, FALSE, 0);
9090 do_iwmmxt_wshufh (void)
9092 inst.instruction |= inst.operands[0].reg << 12;
9093 inst.instruction |= inst.operands[1].reg << 16;
9094 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9095 inst.instruction |= (inst.operands[2].imm & 0x0f);
9099 do_iwmmxt_wzero (void)
9101 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9102 inst.instruction |= inst.operands[0].reg;
9103 inst.instruction |= inst.operands[0].reg << 12;
9104 inst.instruction |= inst.operands[0].reg << 16;
9108 do_iwmmxt_wrwrwr_or_imm5 (void)
9110 if (inst.operands[2].isreg)
9113 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9114 _("immediate operand requires iWMMXt2"));
9116 if (inst.operands[2].imm == 0)
9118 switch ((inst.instruction >> 20) & 0xf)
9124 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9125 inst.operands[2].imm = 16;
9126 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9132 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9133 inst.operands[2].imm = 32;
9134 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9141 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9143 wrn = (inst.instruction >> 16) & 0xf;
9144 inst.instruction &= 0xff0fff0f;
9145 inst.instruction |= wrn;
9146 /* Bail out here; the instruction is now assembled. */
9151 /* Map 32 -> 0, etc. */
9152 inst.operands[2].imm &= 0x1f;
9153 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9157 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9158 operations first, then control, shift, and load/store. */
9160 /* Insns like "foo X,Y,Z". */
9163 do_mav_triple (void)
9165 inst.instruction |= inst.operands[0].reg << 16;
9166 inst.instruction |= inst.operands[1].reg;
9167 inst.instruction |= inst.operands[2].reg << 12;
9170 /* Insns like "foo W,X,Y,Z".
9171 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9176 inst.instruction |= inst.operands[0].reg << 5;
9177 inst.instruction |= inst.operands[1].reg << 12;
9178 inst.instruction |= inst.operands[2].reg << 16;
9179 inst.instruction |= inst.operands[3].reg;
9182 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9186 inst.instruction |= inst.operands[1].reg << 12;
9189 /* Maverick shift immediate instructions.
9190 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9191 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9196 int imm = inst.operands[2].imm;
9198 inst.instruction |= inst.operands[0].reg << 12;
9199 inst.instruction |= inst.operands[1].reg << 16;
9201 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9202 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9203 Bit 4 should be 0. */
9204 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9206 inst.instruction |= imm;
9209 /* XScale instructions. Also sorted arithmetic before move. */
9211 /* Xscale multiply-accumulate (argument parse)
9214 MIAxycc acc0,Rm,Rs. */
9219 inst.instruction |= inst.operands[1].reg;
9220 inst.instruction |= inst.operands[2].reg << 12;
9223 /* Xscale move-accumulator-register (argument parse)
9225 MARcc acc0,RdLo,RdHi. */
9230 inst.instruction |= inst.operands[1].reg << 12;
9231 inst.instruction |= inst.operands[2].reg << 16;
9234 /* Xscale move-register-accumulator (argument parse)
9236 MRAcc RdLo,RdHi,acc0. */
9241 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9242 inst.instruction |= inst.operands[0].reg << 12;
9243 inst.instruction |= inst.operands[1].reg << 16;
9246 /* Encoding functions relevant only to Thumb. */
9248 /* inst.operands[i] is a shifted-register operand; encode
9249 it into inst.instruction in the format used by Thumb32. */
9252 encode_thumb32_shifted_operand (int i)
9254 unsigned int value = inst.reloc.exp.X_add_number;
9255 unsigned int shift = inst.operands[i].shift_kind;
9257 constraint (inst.operands[i].immisreg,
9258 _("shift by register not allowed in thumb mode"));
9259 inst.instruction |= inst.operands[i].reg;
9260 if (shift == SHIFT_RRX)
9261 inst.instruction |= SHIFT_ROR << 4;
9264 constraint (inst.reloc.exp.X_op != O_constant,
9265 _("expression too complex"));
9267 constraint (value > 32
9268 || (value == 32 && (shift == SHIFT_LSL
9269 || shift == SHIFT_ROR)),
9270 _("shift expression is too large"));
9274 else if (value == 32)
9277 inst.instruction |= shift << 4;
9278 inst.instruction |= (value & 0x1c) << 10;
9279 inst.instruction |= (value & 0x03) << 6;
9284 /* inst.operands[i] was set up by parse_address. Encode it into a
9285 Thumb32 format load or store instruction. Reject forms that cannot
9286 be used with such instructions. If is_t is true, reject forms that
9287 cannot be used with a T instruction; if is_d is true, reject forms
9288 that cannot be used with a D instruction. If it is a store insn,
9292 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9294 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9296 constraint (!inst.operands[i].isreg,
9297 _("Instruction does not support =N addresses"));
9299 inst.instruction |= inst.operands[i].reg << 16;
9300 if (inst.operands[i].immisreg)
9302 constraint (is_pc, BAD_PC_ADDRESSING);
9303 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9304 constraint (inst.operands[i].negative,
9305 _("Thumb does not support negative register indexing"));
9306 constraint (inst.operands[i].postind,
9307 _("Thumb does not support register post-indexing"));
9308 constraint (inst.operands[i].writeback,
9309 _("Thumb does not support register indexing with writeback"));
9310 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9311 _("Thumb supports only LSL in shifted register indexing"));
9313 inst.instruction |= inst.operands[i].imm;
9314 if (inst.operands[i].shifted)
9316 constraint (inst.reloc.exp.X_op != O_constant,
9317 _("expression too complex"));
9318 constraint (inst.reloc.exp.X_add_number < 0
9319 || inst.reloc.exp.X_add_number > 3,
9320 _("shift out of range"));
9321 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9323 inst.reloc.type = BFD_RELOC_UNUSED;
9325 else if (inst.operands[i].preind)
9327 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
9328 constraint (is_t && inst.operands[i].writeback,
9329 _("cannot use writeback with this instruction"));
9330 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0)
9331 && !inst.reloc.pc_rel, BAD_PC_ADDRESSING);
9335 inst.instruction |= 0x01000000;
9336 if (inst.operands[i].writeback)
9337 inst.instruction |= 0x00200000;
9341 inst.instruction |= 0x00000c00;
9342 if (inst.operands[i].writeback)
9343 inst.instruction |= 0x00000100;
9345 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9347 else if (inst.operands[i].postind)
9349 gas_assert (inst.operands[i].writeback);
9350 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
9351 constraint (is_t, _("cannot use post-indexing with this instruction"));
9354 inst.instruction |= 0x00200000;
9356 inst.instruction |= 0x00000900;
9357 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
9359 else /* unindexed - only for coprocessor */
9360 inst.error = _("instruction does not accept unindexed addressing");
9363 /* Table of Thumb instructions which exist in both 16- and 32-bit
9364 encodings (the latter only in post-V6T2 cores). The index is the
9365 value used in the insns table below. When there is more than one
9366 possible 16-bit encoding for the instruction, this table always
9368 Also contains several pseudo-instructions used during relaxation. */
9369 #define T16_32_TAB \
9370 X(_adc, 4140, eb400000), \
9371 X(_adcs, 4140, eb500000), \
9372 X(_add, 1c00, eb000000), \
9373 X(_adds, 1c00, eb100000), \
9374 X(_addi, 0000, f1000000), \
9375 X(_addis, 0000, f1100000), \
9376 X(_add_pc,000f, f20f0000), \
9377 X(_add_sp,000d, f10d0000), \
9378 X(_adr, 000f, f20f0000), \
9379 X(_and, 4000, ea000000), \
9380 X(_ands, 4000, ea100000), \
9381 X(_asr, 1000, fa40f000), \
9382 X(_asrs, 1000, fa50f000), \
9383 X(_b, e000, f000b000), \
9384 X(_bcond, d000, f0008000), \
9385 X(_bic, 4380, ea200000), \
9386 X(_bics, 4380, ea300000), \
9387 X(_cmn, 42c0, eb100f00), \
9388 X(_cmp, 2800, ebb00f00), \
9389 X(_cpsie, b660, f3af8400), \
9390 X(_cpsid, b670, f3af8600), \
9391 X(_cpy, 4600, ea4f0000), \
9392 X(_dec_sp,80dd, f1ad0d00), \
9393 X(_eor, 4040, ea800000), \
9394 X(_eors, 4040, ea900000), \
9395 X(_inc_sp,00dd, f10d0d00), \
9396 X(_ldmia, c800, e8900000), \
9397 X(_ldr, 6800, f8500000), \
9398 X(_ldrb, 7800, f8100000), \
9399 X(_ldrh, 8800, f8300000), \
9400 X(_ldrsb, 5600, f9100000), \
9401 X(_ldrsh, 5e00, f9300000), \
9402 X(_ldr_pc,4800, f85f0000), \
9403 X(_ldr_pc2,4800, f85f0000), \
9404 X(_ldr_sp,9800, f85d0000), \
9405 X(_lsl, 0000, fa00f000), \
9406 X(_lsls, 0000, fa10f000), \
9407 X(_lsr, 0800, fa20f000), \
9408 X(_lsrs, 0800, fa30f000), \
9409 X(_mov, 2000, ea4f0000), \
9410 X(_movs, 2000, ea5f0000), \
9411 X(_mul, 4340, fb00f000), \
9412 X(_muls, 4340, ffffffff), /* no 32b muls */ \
9413 X(_mvn, 43c0, ea6f0000), \
9414 X(_mvns, 43c0, ea7f0000), \
9415 X(_neg, 4240, f1c00000), /* rsb #0 */ \
9416 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
9417 X(_orr, 4300, ea400000), \
9418 X(_orrs, 4300, ea500000), \
9419 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
9420 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
9421 X(_rev, ba00, fa90f080), \
9422 X(_rev16, ba40, fa90f090), \
9423 X(_revsh, bac0, fa90f0b0), \
9424 X(_ror, 41c0, fa60f000), \
9425 X(_rors, 41c0, fa70f000), \
9426 X(_sbc, 4180, eb600000), \
9427 X(_sbcs, 4180, eb700000), \
9428 X(_stmia, c000, e8800000), \
9429 X(_str, 6000, f8400000), \
9430 X(_strb, 7000, f8000000), \
9431 X(_strh, 8000, f8200000), \
9432 X(_str_sp,9000, f84d0000), \
9433 X(_sub, 1e00, eba00000), \
9434 X(_subs, 1e00, ebb00000), \
9435 X(_subi, 8000, f1a00000), \
9436 X(_subis, 8000, f1b00000), \
9437 X(_sxtb, b240, fa4ff080), \
9438 X(_sxth, b200, fa0ff080), \
9439 X(_tst, 4200, ea100f00), \
9440 X(_uxtb, b2c0, fa5ff080), \
9441 X(_uxth, b280, fa1ff080), \
9442 X(_nop, bf00, f3af8000), \
9443 X(_yield, bf10, f3af8001), \
9444 X(_wfe, bf20, f3af8002), \
9445 X(_wfi, bf30, f3af8003), \
9446 X(_sev, bf40, f3af8004),
9448 /* To catch errors in encoding functions, the codes are all offset by
9449 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
9450 as 16-bit instructions. */
9451 #define X(a,b,c) T_MNEM##a
9452 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
9455 #define X(a,b,c) 0x##b
9456 static const unsigned short thumb_op16[] = { T16_32_TAB };
9457 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
9460 #define X(a,b,c) 0x##c
9461 static const unsigned int thumb_op32[] = { T16_32_TAB };
9462 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
9463 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
9467 /* Thumb instruction encoders, in alphabetical order. */
9472 do_t_add_sub_w (void)
9476 Rd = inst.operands[0].reg;
9477 Rn = inst.operands[1].reg;
9479 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
9480 is the SP-{plus,minus}-immediate form of the instruction. */
9482 constraint (Rd == REG_PC, BAD_PC);
9484 reject_bad_reg (Rd);
9486 inst.instruction |= (Rn << 16) | (Rd << 8);
9487 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9490 /* Parse an add or subtract instruction. We get here with inst.instruction
9491 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
9498 Rd = inst.operands[0].reg;
9499 Rs = (inst.operands[1].present
9500 ? inst.operands[1].reg /* Rd, Rs, foo */
9501 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9504 set_it_insn_type_last ();
9512 flags = (inst.instruction == T_MNEM_adds
9513 || inst.instruction == T_MNEM_subs);
9515 narrow = !in_it_block ();
9517 narrow = in_it_block ();
9518 if (!inst.operands[2].isreg)
9522 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9524 add = (inst.instruction == T_MNEM_add
9525 || inst.instruction == T_MNEM_adds);
9527 if (inst.size_req != 4)
9529 /* Attempt to use a narrow opcode, with relaxation if
9531 if (Rd == REG_SP && Rs == REG_SP && !flags)
9532 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
9533 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
9534 opcode = T_MNEM_add_sp;
9535 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
9536 opcode = T_MNEM_add_pc;
9537 else if (Rd <= 7 && Rs <= 7 && narrow)
9540 opcode = add ? T_MNEM_addis : T_MNEM_subis;
9542 opcode = add ? T_MNEM_addi : T_MNEM_subi;
9546 inst.instruction = THUMB_OP16(opcode);
9547 inst.instruction |= (Rd << 4) | Rs;
9548 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9549 if (inst.size_req != 2)
9550 inst.relax = opcode;
9553 constraint (inst.size_req == 2, BAD_HIREG);
9555 if (inst.size_req == 4
9556 || (inst.size_req != 2 && !opcode))
9560 constraint (add, BAD_PC);
9561 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
9562 _("only SUBS PC, LR, #const allowed"));
9563 constraint (inst.reloc.exp.X_op != O_constant,
9564 _("expression too complex"));
9565 constraint (inst.reloc.exp.X_add_number < 0
9566 || inst.reloc.exp.X_add_number > 0xff,
9567 _("immediate value out of range"));
9568 inst.instruction = T2_SUBS_PC_LR
9569 | inst.reloc.exp.X_add_number;
9570 inst.reloc.type = BFD_RELOC_UNUSED;
9573 else if (Rs == REG_PC)
9575 /* Always use addw/subw. */
9576 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
9577 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
9581 inst.instruction = THUMB_OP32 (inst.instruction);
9582 inst.instruction = (inst.instruction & 0xe1ffffff)
9585 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9587 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
9589 inst.instruction |= Rd << 8;
9590 inst.instruction |= Rs << 16;
9595 unsigned int value = inst.reloc.exp.X_add_number;
9596 unsigned int shift = inst.operands[2].shift_kind;
9598 Rn = inst.operands[2].reg;
9599 /* See if we can do this with a 16-bit instruction. */
9600 if (!inst.operands[2].shifted && inst.size_req != 4)
9602 if (Rd > 7 || Rs > 7 || Rn > 7)
9607 inst.instruction = ((inst.instruction == T_MNEM_adds
9608 || inst.instruction == T_MNEM_add)
9611 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9615 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
9617 /* Thumb-1 cores (except v6-M) require at least one high
9618 register in a narrow non flag setting add. */
9619 if (Rd > 7 || Rn > 7
9620 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
9621 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
9628 inst.instruction = T_OPCODE_ADD_HI;
9629 inst.instruction |= (Rd & 8) << 4;
9630 inst.instruction |= (Rd & 7);
9631 inst.instruction |= Rn << 3;
9637 constraint (Rd == REG_PC, BAD_PC);
9638 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
9639 constraint (Rs == REG_PC, BAD_PC);
9640 reject_bad_reg (Rn);
9642 /* If we get here, it can't be done in 16 bits. */
9643 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
9644 _("shift must be constant"));
9645 inst.instruction = THUMB_OP32 (inst.instruction);
9646 inst.instruction |= Rd << 8;
9647 inst.instruction |= Rs << 16;
9648 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
9649 _("shift value over 3 not allowed in thumb mode"));
9650 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
9651 _("only LSL shift allowed in thumb mode"));
9652 encode_thumb32_shifted_operand (2);
9657 constraint (inst.instruction == T_MNEM_adds
9658 || inst.instruction == T_MNEM_subs,
9661 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
9663 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
9664 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
9667 inst.instruction = (inst.instruction == T_MNEM_add
9669 inst.instruction |= (Rd << 4) | Rs;
9670 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9674 Rn = inst.operands[2].reg;
9675 constraint (inst.operands[2].shifted, _("unshifted register required"));
9677 /* We now have Rd, Rs, and Rn set to registers. */
9678 if (Rd > 7 || Rs > 7 || Rn > 7)
9680 /* Can't do this for SUB. */
9681 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
9682 inst.instruction = T_OPCODE_ADD_HI;
9683 inst.instruction |= (Rd & 8) << 4;
9684 inst.instruction |= (Rd & 7);
9686 inst.instruction |= Rn << 3;
9688 inst.instruction |= Rs << 3;
9690 constraint (1, _("dest must overlap one source register"));
9694 inst.instruction = (inst.instruction == T_MNEM_add
9695 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
9696 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
9706 Rd = inst.operands[0].reg;
9707 reject_bad_reg (Rd);
9709 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
9711 /* Defer to section relaxation. */
9712 inst.relax = inst.instruction;
9713 inst.instruction = THUMB_OP16 (inst.instruction);
9714 inst.instruction |= Rd << 4;
9716 else if (unified_syntax && inst.size_req != 2)
9718 /* Generate a 32-bit opcode. */
9719 inst.instruction = THUMB_OP32 (inst.instruction);
9720 inst.instruction |= Rd << 8;
9721 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
9722 inst.reloc.pc_rel = 1;
9726 /* Generate a 16-bit opcode. */
9727 inst.instruction = THUMB_OP16 (inst.instruction);
9728 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
9729 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
9730 inst.reloc.pc_rel = 1;
9732 inst.instruction |= Rd << 4;
9736 /* Arithmetic instructions for which there is just one 16-bit
9737 instruction encoding, and it allows only two low registers.
9738 For maximal compatibility with ARM syntax, we allow three register
9739 operands even when Thumb-32 instructions are not available, as long
9740 as the first two are identical. For instance, both "sbc r0,r1" and
9741 "sbc r0,r0,r1" are allowed. */
9747 Rd = inst.operands[0].reg;
9748 Rs = (inst.operands[1].present
9749 ? inst.operands[1].reg /* Rd, Rs, foo */
9750 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9751 Rn = inst.operands[2].reg;
9753 reject_bad_reg (Rd);
9754 reject_bad_reg (Rs);
9755 if (inst.operands[2].isreg)
9756 reject_bad_reg (Rn);
9760 if (!inst.operands[2].isreg)
9762 /* For an immediate, we always generate a 32-bit opcode;
9763 section relaxation will shrink it later if possible. */
9764 inst.instruction = THUMB_OP32 (inst.instruction);
9765 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9766 inst.instruction |= Rd << 8;
9767 inst.instruction |= Rs << 16;
9768 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9774 /* See if we can do this with a 16-bit instruction. */
9775 if (THUMB_SETS_FLAGS (inst.instruction))
9776 narrow = !in_it_block ();
9778 narrow = in_it_block ();
9780 if (Rd > 7 || Rn > 7 || Rs > 7)
9782 if (inst.operands[2].shifted)
9784 if (inst.size_req == 4)
9790 inst.instruction = THUMB_OP16 (inst.instruction);
9791 inst.instruction |= Rd;
9792 inst.instruction |= Rn << 3;
9796 /* If we get here, it can't be done in 16 bits. */
9797 constraint (inst.operands[2].shifted
9798 && inst.operands[2].immisreg,
9799 _("shift must be constant"));
9800 inst.instruction = THUMB_OP32 (inst.instruction);
9801 inst.instruction |= Rd << 8;
9802 inst.instruction |= Rs << 16;
9803 encode_thumb32_shifted_operand (2);
9808 /* On its face this is a lie - the instruction does set the
9809 flags. However, the only supported mnemonic in this mode
9811 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9813 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9814 _("unshifted register required"));
9815 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9816 constraint (Rd != Rs,
9817 _("dest and source1 must be the same register"));
9819 inst.instruction = THUMB_OP16 (inst.instruction);
9820 inst.instruction |= Rd;
9821 inst.instruction |= Rn << 3;
9825 /* Similarly, but for instructions where the arithmetic operation is
9826 commutative, so we can allow either of them to be different from
9827 the destination operand in a 16-bit instruction. For instance, all
9828 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
9835 Rd = inst.operands[0].reg;
9836 Rs = (inst.operands[1].present
9837 ? inst.operands[1].reg /* Rd, Rs, foo */
9838 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9839 Rn = inst.operands[2].reg;
9841 reject_bad_reg (Rd);
9842 reject_bad_reg (Rs);
9843 if (inst.operands[2].isreg)
9844 reject_bad_reg (Rn);
9848 if (!inst.operands[2].isreg)
9850 /* For an immediate, we always generate a 32-bit opcode;
9851 section relaxation will shrink it later if possible. */
9852 inst.instruction = THUMB_OP32 (inst.instruction);
9853 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9854 inst.instruction |= Rd << 8;
9855 inst.instruction |= Rs << 16;
9856 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9862 /* See if we can do this with a 16-bit instruction. */
9863 if (THUMB_SETS_FLAGS (inst.instruction))
9864 narrow = !in_it_block ();
9866 narrow = in_it_block ();
9868 if (Rd > 7 || Rn > 7 || Rs > 7)
9870 if (inst.operands[2].shifted)
9872 if (inst.size_req == 4)
9879 inst.instruction = THUMB_OP16 (inst.instruction);
9880 inst.instruction |= Rd;
9881 inst.instruction |= Rn << 3;
9886 inst.instruction = THUMB_OP16 (inst.instruction);
9887 inst.instruction |= Rd;
9888 inst.instruction |= Rs << 3;
9893 /* If we get here, it can't be done in 16 bits. */
9894 constraint (inst.operands[2].shifted
9895 && inst.operands[2].immisreg,
9896 _("shift must be constant"));
9897 inst.instruction = THUMB_OP32 (inst.instruction);
9898 inst.instruction |= Rd << 8;
9899 inst.instruction |= Rs << 16;
9900 encode_thumb32_shifted_operand (2);
9905 /* On its face this is a lie - the instruction does set the
9906 flags. However, the only supported mnemonic in this mode
9908 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9910 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
9911 _("unshifted register required"));
9912 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
9914 inst.instruction = THUMB_OP16 (inst.instruction);
9915 inst.instruction |= Rd;
9918 inst.instruction |= Rn << 3;
9920 inst.instruction |= Rs << 3;
9922 constraint (1, _("dest must overlap one source register"));
9929 if (inst.operands[0].present)
9931 constraint ((inst.instruction & 0xf0) != 0x40
9932 && inst.operands[0].imm > 0xf
9933 && inst.operands[0].imm < 0x0,
9934 _("bad barrier type"));
9935 inst.instruction |= inst.operands[0].imm;
9938 inst.instruction |= 0xf;
9945 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9946 constraint (msb > 32, _("bit-field extends past end of register"));
9947 /* The instruction encoding stores the LSB and MSB,
9948 not the LSB and width. */
9949 Rd = inst.operands[0].reg;
9950 reject_bad_reg (Rd);
9951 inst.instruction |= Rd << 8;
9952 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
9953 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
9954 inst.instruction |= msb - 1;
9963 Rd = inst.operands[0].reg;
9964 reject_bad_reg (Rd);
9966 /* #0 in second position is alternative syntax for bfc, which is
9967 the same instruction but with REG_PC in the Rm field. */
9968 if (!inst.operands[1].isreg)
9972 Rn = inst.operands[1].reg;
9973 reject_bad_reg (Rn);
9976 msb = inst.operands[2].imm + inst.operands[3].imm;
9977 constraint (msb > 32, _("bit-field extends past end of register"));
9978 /* The instruction encoding stores the LSB and MSB,
9979 not the LSB and width. */
9980 inst.instruction |= Rd << 8;
9981 inst.instruction |= Rn << 16;
9982 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
9983 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
9984 inst.instruction |= msb - 1;
9992 Rd = inst.operands[0].reg;
9993 Rn = inst.operands[1].reg;
9995 reject_bad_reg (Rd);
9996 reject_bad_reg (Rn);
9998 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9999 _("bit-field extends past end of register"));
10000 inst.instruction |= Rd << 8;
10001 inst.instruction |= Rn << 16;
10002 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10003 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10004 inst.instruction |= inst.operands[3].imm - 1;
10007 /* ARM V5 Thumb BLX (argument parse)
10008 BLX <target_addr> which is BLX(1)
10009 BLX <Rm> which is BLX(2)
10010 Unfortunately, there are two different opcodes for this mnemonic.
10011 So, the insns[].value is not used, and the code here zaps values
10012 into inst.instruction.
10014 ??? How to take advantage of the additional two bits of displacement
10015 available in Thumb32 mode? Need new relocation? */
10020 set_it_insn_type_last ();
10022 if (inst.operands[0].isreg)
10024 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10025 /* We have a register, so this is BLX(2). */
10026 inst.instruction |= inst.operands[0].reg << 3;
10030 /* No register. This must be BLX(1). */
10031 inst.instruction = 0xf000e800;
10032 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10044 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10046 if (in_it_block ())
10048 /* Conditional branches inside IT blocks are encoded as unconditional
10050 cond = COND_ALWAYS;
10055 if (cond != COND_ALWAYS)
10056 opcode = T_MNEM_bcond;
10058 opcode = inst.instruction;
10061 && (inst.size_req == 4
10062 || (inst.size_req != 2
10063 && (inst.operands[0].hasreloc
10064 || inst.reloc.exp.X_op == O_constant))))
10066 inst.instruction = THUMB_OP32(opcode);
10067 if (cond == COND_ALWAYS)
10068 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10071 gas_assert (cond != 0xF);
10072 inst.instruction |= cond << 22;
10073 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10078 inst.instruction = THUMB_OP16(opcode);
10079 if (cond == COND_ALWAYS)
10080 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10083 inst.instruction |= cond << 8;
10084 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10086 /* Allow section relaxation. */
10087 if (unified_syntax && inst.size_req != 2)
10088 inst.relax = opcode;
10090 inst.reloc.type = reloc;
10091 inst.reloc.pc_rel = 1;
10097 constraint (inst.cond != COND_ALWAYS,
10098 _("instruction is always unconditional"));
10099 if (inst.operands[0].present)
10101 constraint (inst.operands[0].imm > 255,
10102 _("immediate value out of range"));
10103 inst.instruction |= inst.operands[0].imm;
10104 set_it_insn_type (NEUTRAL_IT_INSN);
10109 do_t_branch23 (void)
10111 set_it_insn_type_last ();
10112 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10114 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10115 this file. We used to simply ignore the PLT reloc type here --
10116 the branch encoding is now needed to deal with TLSCALL relocs.
10117 So if we see a PLT reloc now, put it back to how it used to be to
10118 keep the preexisting behaviour. */
10119 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10120 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10122 #if defined(OBJ_COFF)
10123 /* If the destination of the branch is a defined symbol which does not have
10124 the THUMB_FUNC attribute, then we must be calling a function which has
10125 the (interfacearm) attribute. We look for the Thumb entry point to that
10126 function and change the branch to refer to that function instead. */
10127 if ( inst.reloc.exp.X_op == O_symbol
10128 && inst.reloc.exp.X_add_symbol != NULL
10129 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10130 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10131 inst.reloc.exp.X_add_symbol =
10132 find_real_start (inst.reloc.exp.X_add_symbol);
10139 set_it_insn_type_last ();
10140 inst.instruction |= inst.operands[0].reg << 3;
10141 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10142 should cause the alignment to be checked once it is known. This is
10143 because BX PC only works if the instruction is word aligned. */
10151 set_it_insn_type_last ();
10152 Rm = inst.operands[0].reg;
10153 reject_bad_reg (Rm);
10154 inst.instruction |= Rm << 16;
10163 Rd = inst.operands[0].reg;
10164 Rm = inst.operands[1].reg;
10166 reject_bad_reg (Rd);
10167 reject_bad_reg (Rm);
10169 inst.instruction |= Rd << 8;
10170 inst.instruction |= Rm << 16;
10171 inst.instruction |= Rm;
10177 set_it_insn_type (OUTSIDE_IT_INSN);
10178 inst.instruction |= inst.operands[0].imm;
10184 set_it_insn_type (OUTSIDE_IT_INSN);
10186 && (inst.operands[1].present || inst.size_req == 4)
10187 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10189 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10190 inst.instruction = 0xf3af8000;
10191 inst.instruction |= imod << 9;
10192 inst.instruction |= inst.operands[0].imm << 5;
10193 if (inst.operands[1].present)
10194 inst.instruction |= 0x100 | inst.operands[1].imm;
10198 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10199 && (inst.operands[0].imm & 4),
10200 _("selected processor does not support 'A' form "
10201 "of this instruction"));
10202 constraint (inst.operands[1].present || inst.size_req == 4,
10203 _("Thumb does not support the 2-argument "
10204 "form of this instruction"));
10205 inst.instruction |= inst.operands[0].imm;
10209 /* THUMB CPY instruction (argument parse). */
10214 if (inst.size_req == 4)
10216 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10217 inst.instruction |= inst.operands[0].reg << 8;
10218 inst.instruction |= inst.operands[1].reg;
10222 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10223 inst.instruction |= (inst.operands[0].reg & 0x7);
10224 inst.instruction |= inst.operands[1].reg << 3;
10231 set_it_insn_type (OUTSIDE_IT_INSN);
10232 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10233 inst.instruction |= inst.operands[0].reg;
10234 inst.reloc.pc_rel = 1;
10235 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10241 inst.instruction |= inst.operands[0].imm;
10247 unsigned Rd, Rn, Rm;
10249 Rd = inst.operands[0].reg;
10250 Rn = (inst.operands[1].present
10251 ? inst.operands[1].reg : Rd);
10252 Rm = inst.operands[2].reg;
10254 reject_bad_reg (Rd);
10255 reject_bad_reg (Rn);
10256 reject_bad_reg (Rm);
10258 inst.instruction |= Rd << 8;
10259 inst.instruction |= Rn << 16;
10260 inst.instruction |= Rm;
10266 if (unified_syntax && inst.size_req == 4)
10267 inst.instruction = THUMB_OP32 (inst.instruction);
10269 inst.instruction = THUMB_OP16 (inst.instruction);
10275 unsigned int cond = inst.operands[0].imm;
10277 set_it_insn_type (IT_INSN);
10278 now_it.mask = (inst.instruction & 0xf) | 0x10;
10281 /* If the condition is a negative condition, invert the mask. */
10282 if ((cond & 0x1) == 0x0)
10284 unsigned int mask = inst.instruction & 0x000f;
10286 if ((mask & 0x7) == 0)
10287 /* no conversion needed */;
10288 else if ((mask & 0x3) == 0)
10290 else if ((mask & 0x1) == 0)
10295 inst.instruction &= 0xfff0;
10296 inst.instruction |= mask;
10299 inst.instruction |= cond << 4;
10302 /* Helper function used for both push/pop and ldm/stm. */
10304 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
10308 load = (inst.instruction & (1 << 20)) != 0;
10310 if (mask & (1 << 13))
10311 inst.error = _("SP not allowed in register list");
10313 if ((mask & (1 << base)) != 0
10315 inst.error = _("having the base register in the register list when "
10316 "using write back is UNPREDICTABLE");
10320 if (mask & (1 << 15))
10322 if (mask & (1 << 14))
10323 inst.error = _("LR and PC should not both be in register list");
10325 set_it_insn_type_last ();
10330 if (mask & (1 << 15))
10331 inst.error = _("PC not allowed in register list");
10334 if ((mask & (mask - 1)) == 0)
10336 /* Single register transfers implemented as str/ldr. */
10339 if (inst.instruction & (1 << 23))
10340 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
10342 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
10346 if (inst.instruction & (1 << 23))
10347 inst.instruction = 0x00800000; /* ia -> [base] */
10349 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
10352 inst.instruction |= 0xf8400000;
10354 inst.instruction |= 0x00100000;
10356 mask = ffs (mask) - 1;
10359 else if (writeback)
10360 inst.instruction |= WRITE_BACK;
10362 inst.instruction |= mask;
10363 inst.instruction |= base << 16;
10369 /* This really doesn't seem worth it. */
10370 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
10371 _("expression too complex"));
10372 constraint (inst.operands[1].writeback,
10373 _("Thumb load/store multiple does not support {reglist}^"));
10375 if (unified_syntax)
10377 bfd_boolean narrow;
10381 /* See if we can use a 16-bit instruction. */
10382 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
10383 && inst.size_req != 4
10384 && !(inst.operands[1].imm & ~0xff))
10386 mask = 1 << inst.operands[0].reg;
10388 if (inst.operands[0].reg <= 7)
10390 if (inst.instruction == T_MNEM_stmia
10391 ? inst.operands[0].writeback
10392 : (inst.operands[0].writeback
10393 == !(inst.operands[1].imm & mask)))
10395 if (inst.instruction == T_MNEM_stmia
10396 && (inst.operands[1].imm & mask)
10397 && (inst.operands[1].imm & (mask - 1)))
10398 as_warn (_("value stored for r%d is UNKNOWN"),
10399 inst.operands[0].reg);
10401 inst.instruction = THUMB_OP16 (inst.instruction);
10402 inst.instruction |= inst.operands[0].reg << 8;
10403 inst.instruction |= inst.operands[1].imm;
10406 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10408 /* This means 1 register in reg list one of 3 situations:
10409 1. Instruction is stmia, but without writeback.
10410 2. lmdia without writeback, but with Rn not in
10412 3. ldmia with writeback, but with Rn in reglist.
10413 Case 3 is UNPREDICTABLE behaviour, so we handle
10414 case 1 and 2 which can be converted into a 16-bit
10415 str or ldr. The SP cases are handled below. */
10416 unsigned long opcode;
10417 /* First, record an error for Case 3. */
10418 if (inst.operands[1].imm & mask
10419 && inst.operands[0].writeback)
10421 _("having the base register in the register list when "
10422 "using write back is UNPREDICTABLE");
10424 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
10426 inst.instruction = THUMB_OP16 (opcode);
10427 inst.instruction |= inst.operands[0].reg << 3;
10428 inst.instruction |= (ffs (inst.operands[1].imm)-1);
10432 else if (inst.operands[0] .reg == REG_SP)
10434 if (inst.operands[0].writeback)
10437 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10438 ? T_MNEM_push : T_MNEM_pop);
10439 inst.instruction |= inst.operands[1].imm;
10442 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
10445 THUMB_OP16 (inst.instruction == T_MNEM_stmia
10446 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
10447 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
10455 if (inst.instruction < 0xffff)
10456 inst.instruction = THUMB_OP32 (inst.instruction);
10458 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
10459 inst.operands[0].writeback);
10464 constraint (inst.operands[0].reg > 7
10465 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
10466 constraint (inst.instruction != T_MNEM_ldmia
10467 && inst.instruction != T_MNEM_stmia,
10468 _("Thumb-2 instruction only valid in unified syntax"));
10469 if (inst.instruction == T_MNEM_stmia)
10471 if (!inst.operands[0].writeback)
10472 as_warn (_("this instruction will write back the base register"));
10473 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
10474 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
10475 as_warn (_("value stored for r%d is UNKNOWN"),
10476 inst.operands[0].reg);
10480 if (!inst.operands[0].writeback
10481 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
10482 as_warn (_("this instruction will write back the base register"));
10483 else if (inst.operands[0].writeback
10484 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
10485 as_warn (_("this instruction will not write back the base register"));
10488 inst.instruction = THUMB_OP16 (inst.instruction);
10489 inst.instruction |= inst.operands[0].reg << 8;
10490 inst.instruction |= inst.operands[1].imm;
10497 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
10498 || inst.operands[1].postind || inst.operands[1].writeback
10499 || inst.operands[1].immisreg || inst.operands[1].shifted
10500 || inst.operands[1].negative,
10503 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
10505 inst.instruction |= inst.operands[0].reg << 12;
10506 inst.instruction |= inst.operands[1].reg << 16;
10507 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10513 if (!inst.operands[1].present)
10515 constraint (inst.operands[0].reg == REG_LR,
10516 _("r14 not allowed as first register "
10517 "when second register is omitted"));
10518 inst.operands[1].reg = inst.operands[0].reg + 1;
10520 constraint (inst.operands[0].reg == inst.operands[1].reg,
10523 inst.instruction |= inst.operands[0].reg << 12;
10524 inst.instruction |= inst.operands[1].reg << 8;
10525 inst.instruction |= inst.operands[2].reg << 16;
10531 unsigned long opcode;
10534 if (inst.operands[0].isreg
10535 && !inst.operands[0].preind
10536 && inst.operands[0].reg == REG_PC)
10537 set_it_insn_type_last ();
10539 opcode = inst.instruction;
10540 if (unified_syntax)
10542 if (!inst.operands[1].isreg)
10544 if (opcode <= 0xffff)
10545 inst.instruction = THUMB_OP32 (opcode);
10546 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10549 if (inst.operands[1].isreg
10550 && !inst.operands[1].writeback
10551 && !inst.operands[1].shifted && !inst.operands[1].postind
10552 && !inst.operands[1].negative && inst.operands[0].reg <= 7
10553 && opcode <= 0xffff
10554 && inst.size_req != 4)
10556 /* Insn may have a 16-bit form. */
10557 Rn = inst.operands[1].reg;
10558 if (inst.operands[1].immisreg)
10560 inst.instruction = THUMB_OP16 (opcode);
10562 if (Rn <= 7 && inst.operands[1].imm <= 7)
10564 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
10565 reject_bad_reg (inst.operands[1].imm);
10567 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
10568 && opcode != T_MNEM_ldrsb)
10569 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
10570 || (Rn == REG_SP && opcode == T_MNEM_str))
10577 if (inst.reloc.pc_rel)
10578 opcode = T_MNEM_ldr_pc2;
10580 opcode = T_MNEM_ldr_pc;
10584 if (opcode == T_MNEM_ldr)
10585 opcode = T_MNEM_ldr_sp;
10587 opcode = T_MNEM_str_sp;
10589 inst.instruction = inst.operands[0].reg << 8;
10593 inst.instruction = inst.operands[0].reg;
10594 inst.instruction |= inst.operands[1].reg << 3;
10596 inst.instruction |= THUMB_OP16 (opcode);
10597 if (inst.size_req == 2)
10598 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10600 inst.relax = opcode;
10604 /* Definitely a 32-bit variant. */
10606 /* Warning for Erratum 752419. */
10607 if (opcode == T_MNEM_ldr
10608 && inst.operands[0].reg == REG_SP
10609 && inst.operands[1].writeback == 1
10610 && !inst.operands[1].immisreg)
10612 if (no_cpu_selected ()
10613 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
10614 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
10615 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
10616 as_warn (_("This instruction may be unpredictable "
10617 "if executed on M-profile cores "
10618 "with interrupts enabled."));
10621 /* Do some validations regarding addressing modes. */
10622 if (inst.operands[1].immisreg)
10623 reject_bad_reg (inst.operands[1].imm);
10625 constraint (inst.operands[1].writeback == 1
10626 && inst.operands[0].reg == inst.operands[1].reg,
10629 inst.instruction = THUMB_OP32 (opcode);
10630 inst.instruction |= inst.operands[0].reg << 12;
10631 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
10632 check_ldr_r15_aligned ();
10636 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10638 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
10640 /* Only [Rn,Rm] is acceptable. */
10641 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
10642 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
10643 || inst.operands[1].postind || inst.operands[1].shifted
10644 || inst.operands[1].negative,
10645 _("Thumb does not support this addressing mode"));
10646 inst.instruction = THUMB_OP16 (inst.instruction);
10650 inst.instruction = THUMB_OP16 (inst.instruction);
10651 if (!inst.operands[1].isreg)
10652 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
10655 constraint (!inst.operands[1].preind
10656 || inst.operands[1].shifted
10657 || inst.operands[1].writeback,
10658 _("Thumb does not support this addressing mode"));
10659 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
10661 constraint (inst.instruction & 0x0600,
10662 _("byte or halfword not valid for base register"));
10663 constraint (inst.operands[1].reg == REG_PC
10664 && !(inst.instruction & THUMB_LOAD_BIT),
10665 _("r15 based store not allowed"));
10666 constraint (inst.operands[1].immisreg,
10667 _("invalid base register for register offset"));
10669 if (inst.operands[1].reg == REG_PC)
10670 inst.instruction = T_OPCODE_LDR_PC;
10671 else if (inst.instruction & THUMB_LOAD_BIT)
10672 inst.instruction = T_OPCODE_LDR_SP;
10674 inst.instruction = T_OPCODE_STR_SP;
10676 inst.instruction |= inst.operands[0].reg << 8;
10677 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10681 constraint (inst.operands[1].reg > 7, BAD_HIREG);
10682 if (!inst.operands[1].immisreg)
10684 /* Immediate offset. */
10685 inst.instruction |= inst.operands[0].reg;
10686 inst.instruction |= inst.operands[1].reg << 3;
10687 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
10691 /* Register offset. */
10692 constraint (inst.operands[1].imm > 7, BAD_HIREG);
10693 constraint (inst.operands[1].negative,
10694 _("Thumb does not support this addressing mode"));
10697 switch (inst.instruction)
10699 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
10700 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
10701 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
10702 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
10703 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
10704 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
10705 case 0x5600 /* ldrsb */:
10706 case 0x5e00 /* ldrsh */: break;
10710 inst.instruction |= inst.operands[0].reg;
10711 inst.instruction |= inst.operands[1].reg << 3;
10712 inst.instruction |= inst.operands[1].imm << 6;
10718 if (!inst.operands[1].present)
10720 inst.operands[1].reg = inst.operands[0].reg + 1;
10721 constraint (inst.operands[0].reg == REG_LR,
10722 _("r14 not allowed here"));
10723 constraint (inst.operands[0].reg == REG_R12,
10724 _("r12 not allowed here"));
10727 if (inst.operands[2].writeback
10728 && (inst.operands[0].reg == inst.operands[2].reg
10729 || inst.operands[1].reg == inst.operands[2].reg))
10730 as_warn (_("base register written back, and overlaps "
10731 "one of transfer registers"));
10733 inst.instruction |= inst.operands[0].reg << 12;
10734 inst.instruction |= inst.operands[1].reg << 8;
10735 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
10741 inst.instruction |= inst.operands[0].reg << 12;
10742 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
10748 unsigned Rd, Rn, Rm, Ra;
10750 Rd = inst.operands[0].reg;
10751 Rn = inst.operands[1].reg;
10752 Rm = inst.operands[2].reg;
10753 Ra = inst.operands[3].reg;
10755 reject_bad_reg (Rd);
10756 reject_bad_reg (Rn);
10757 reject_bad_reg (Rm);
10758 reject_bad_reg (Ra);
10760 inst.instruction |= Rd << 8;
10761 inst.instruction |= Rn << 16;
10762 inst.instruction |= Rm;
10763 inst.instruction |= Ra << 12;
10769 unsigned RdLo, RdHi, Rn, Rm;
10771 RdLo = inst.operands[0].reg;
10772 RdHi = inst.operands[1].reg;
10773 Rn = inst.operands[2].reg;
10774 Rm = inst.operands[3].reg;
10776 reject_bad_reg (RdLo);
10777 reject_bad_reg (RdHi);
10778 reject_bad_reg (Rn);
10779 reject_bad_reg (Rm);
10781 inst.instruction |= RdLo << 12;
10782 inst.instruction |= RdHi << 8;
10783 inst.instruction |= Rn << 16;
10784 inst.instruction |= Rm;
10788 do_t_mov_cmp (void)
10792 Rn = inst.operands[0].reg;
10793 Rm = inst.operands[1].reg;
10796 set_it_insn_type_last ();
10798 if (unified_syntax)
10800 int r0off = (inst.instruction == T_MNEM_mov
10801 || inst.instruction == T_MNEM_movs) ? 8 : 16;
10802 unsigned long opcode;
10803 bfd_boolean narrow;
10804 bfd_boolean low_regs;
10806 low_regs = (Rn <= 7 && Rm <= 7);
10807 opcode = inst.instruction;
10808 if (in_it_block ())
10809 narrow = opcode != T_MNEM_movs;
10811 narrow = opcode != T_MNEM_movs || low_regs;
10812 if (inst.size_req == 4
10813 || inst.operands[1].shifted)
10816 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
10817 if (opcode == T_MNEM_movs && inst.operands[1].isreg
10818 && !inst.operands[1].shifted
10822 inst.instruction = T2_SUBS_PC_LR;
10826 if (opcode == T_MNEM_cmp)
10828 constraint (Rn == REG_PC, BAD_PC);
10831 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
10833 warn_deprecated_sp (Rm);
10834 /* R15 was documented as a valid choice for Rm in ARMv6,
10835 but as UNPREDICTABLE in ARMv7. ARM's proprietary
10836 tools reject R15, so we do too. */
10837 constraint (Rm == REG_PC, BAD_PC);
10840 reject_bad_reg (Rm);
10842 else if (opcode == T_MNEM_mov
10843 || opcode == T_MNEM_movs)
10845 if (inst.operands[1].isreg)
10847 if (opcode == T_MNEM_movs)
10849 reject_bad_reg (Rn);
10850 reject_bad_reg (Rm);
10854 /* This is mov.n. */
10855 if ((Rn == REG_SP || Rn == REG_PC)
10856 && (Rm == REG_SP || Rm == REG_PC))
10858 as_warn (_("Use of r%u as a source register is "
10859 "deprecated when r%u is the destination "
10860 "register."), Rm, Rn);
10865 /* This is mov.w. */
10866 constraint (Rn == REG_PC, BAD_PC);
10867 constraint (Rm == REG_PC, BAD_PC);
10868 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
10872 reject_bad_reg (Rn);
10875 if (!inst.operands[1].isreg)
10877 /* Immediate operand. */
10878 if (!in_it_block () && opcode == T_MNEM_mov)
10880 if (low_regs && narrow)
10882 inst.instruction = THUMB_OP16 (opcode);
10883 inst.instruction |= Rn << 8;
10884 if (inst.size_req == 2)
10885 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
10887 inst.relax = opcode;
10891 inst.instruction = THUMB_OP32 (inst.instruction);
10892 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10893 inst.instruction |= Rn << r0off;
10894 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10897 else if (inst.operands[1].shifted && inst.operands[1].immisreg
10898 && (inst.instruction == T_MNEM_mov
10899 || inst.instruction == T_MNEM_movs))
10901 /* Register shifts are encoded as separate shift instructions. */
10902 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
10904 if (in_it_block ())
10909 if (inst.size_req == 4)
10912 if (!low_regs || inst.operands[1].imm > 7)
10918 switch (inst.operands[1].shift_kind)
10921 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
10924 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
10927 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
10930 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
10936 inst.instruction = opcode;
10939 inst.instruction |= Rn;
10940 inst.instruction |= inst.operands[1].imm << 3;
10945 inst.instruction |= CONDS_BIT;
10947 inst.instruction |= Rn << 8;
10948 inst.instruction |= Rm << 16;
10949 inst.instruction |= inst.operands[1].imm;
10954 /* Some mov with immediate shift have narrow variants.
10955 Register shifts are handled above. */
10956 if (low_regs && inst.operands[1].shifted
10957 && (inst.instruction == T_MNEM_mov
10958 || inst.instruction == T_MNEM_movs))
10960 if (in_it_block ())
10961 narrow = (inst.instruction == T_MNEM_mov);
10963 narrow = (inst.instruction == T_MNEM_movs);
10968 switch (inst.operands[1].shift_kind)
10970 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
10971 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
10972 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
10973 default: narrow = FALSE; break;
10979 inst.instruction |= Rn;
10980 inst.instruction |= Rm << 3;
10981 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10985 inst.instruction = THUMB_OP32 (inst.instruction);
10986 inst.instruction |= Rn << r0off;
10987 encode_thumb32_shifted_operand (1);
10991 switch (inst.instruction)
10994 /* In v4t or v5t a move of two lowregs produces unpredictable
10995 results. Don't allow this. */
10998 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
10999 "MOV Rd, Rs with two low registers is not "
11000 "permitted on this architecture");
11001 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11005 inst.instruction = T_OPCODE_MOV_HR;
11006 inst.instruction |= (Rn & 0x8) << 4;
11007 inst.instruction |= (Rn & 0x7);
11008 inst.instruction |= Rm << 3;
11012 /* We know we have low registers at this point.
11013 Generate LSLS Rd, Rs, #0. */
11014 inst.instruction = T_OPCODE_LSL_I;
11015 inst.instruction |= Rn;
11016 inst.instruction |= Rm << 3;
11022 inst.instruction = T_OPCODE_CMP_LR;
11023 inst.instruction |= Rn;
11024 inst.instruction |= Rm << 3;
11028 inst.instruction = T_OPCODE_CMP_HR;
11029 inst.instruction |= (Rn & 0x8) << 4;
11030 inst.instruction |= (Rn & 0x7);
11031 inst.instruction |= Rm << 3;
11038 inst.instruction = THUMB_OP16 (inst.instruction);
11040 /* PR 10443: Do not silently ignore shifted operands. */
11041 constraint (inst.operands[1].shifted,
11042 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11044 if (inst.operands[1].isreg)
11046 if (Rn < 8 && Rm < 8)
11048 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11049 since a MOV instruction produces unpredictable results. */
11050 if (inst.instruction == T_OPCODE_MOV_I8)
11051 inst.instruction = T_OPCODE_ADD_I3;
11053 inst.instruction = T_OPCODE_CMP_LR;
11055 inst.instruction |= Rn;
11056 inst.instruction |= Rm << 3;
11060 if (inst.instruction == T_OPCODE_MOV_I8)
11061 inst.instruction = T_OPCODE_MOV_HR;
11063 inst.instruction = T_OPCODE_CMP_HR;
11069 constraint (Rn > 7,
11070 _("only lo regs allowed with immediate"));
11071 inst.instruction |= Rn << 8;
11072 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11083 top = (inst.instruction & 0x00800000) != 0;
11084 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11086 constraint (top, _(":lower16: not allowed this instruction"));
11087 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11089 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11091 constraint (!top, _(":upper16: not allowed this instruction"));
11092 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11095 Rd = inst.operands[0].reg;
11096 reject_bad_reg (Rd);
11098 inst.instruction |= Rd << 8;
11099 if (inst.reloc.type == BFD_RELOC_UNUSED)
11101 imm = inst.reloc.exp.X_add_number;
11102 inst.instruction |= (imm & 0xf000) << 4;
11103 inst.instruction |= (imm & 0x0800) << 15;
11104 inst.instruction |= (imm & 0x0700) << 4;
11105 inst.instruction |= (imm & 0x00ff);
11110 do_t_mvn_tst (void)
11114 Rn = inst.operands[0].reg;
11115 Rm = inst.operands[1].reg;
11117 if (inst.instruction == T_MNEM_cmp
11118 || inst.instruction == T_MNEM_cmn)
11119 constraint (Rn == REG_PC, BAD_PC);
11121 reject_bad_reg (Rn);
11122 reject_bad_reg (Rm);
11124 if (unified_syntax)
11126 int r0off = (inst.instruction == T_MNEM_mvn
11127 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11128 bfd_boolean narrow;
11130 if (inst.size_req == 4
11131 || inst.instruction > 0xffff
11132 || inst.operands[1].shifted
11133 || Rn > 7 || Rm > 7)
11135 else if (inst.instruction == T_MNEM_cmn)
11137 else if (THUMB_SETS_FLAGS (inst.instruction))
11138 narrow = !in_it_block ();
11140 narrow = in_it_block ();
11142 if (!inst.operands[1].isreg)
11144 /* For an immediate, we always generate a 32-bit opcode;
11145 section relaxation will shrink it later if possible. */
11146 if (inst.instruction < 0xffff)
11147 inst.instruction = THUMB_OP32 (inst.instruction);
11148 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11149 inst.instruction |= Rn << r0off;
11150 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11154 /* See if we can do this with a 16-bit instruction. */
11157 inst.instruction = THUMB_OP16 (inst.instruction);
11158 inst.instruction |= Rn;
11159 inst.instruction |= Rm << 3;
11163 constraint (inst.operands[1].shifted
11164 && inst.operands[1].immisreg,
11165 _("shift must be constant"));
11166 if (inst.instruction < 0xffff)
11167 inst.instruction = THUMB_OP32 (inst.instruction);
11168 inst.instruction |= Rn << r0off;
11169 encode_thumb32_shifted_operand (1);
11175 constraint (inst.instruction > 0xffff
11176 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11177 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11178 _("unshifted register required"));
11179 constraint (Rn > 7 || Rm > 7,
11182 inst.instruction = THUMB_OP16 (inst.instruction);
11183 inst.instruction |= Rn;
11184 inst.instruction |= Rm << 3;
11193 if (do_vfp_nsyn_mrs () == SUCCESS)
11196 Rd = inst.operands[0].reg;
11197 reject_bad_reg (Rd);
11198 inst.instruction |= Rd << 8;
11200 if (inst.operands[1].isreg)
11202 unsigned br = inst.operands[1].reg;
11203 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11204 as_bad (_("bad register for mrs"));
11206 inst.instruction |= br & (0xf << 16);
11207 inst.instruction |= (br & 0x300) >> 4;
11208 inst.instruction |= (br & SPSR_BIT) >> 2;
11212 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11214 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11216 /* PR gas/12698: The constraint is only applied for m_profile.
11217 If the user has specified -march=all, we want to ignore it as
11218 we are building for any CPU type, including non-m variants. */
11219 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11220 constraint ((flags != 0) && m_profile, _("selected processor does "
11221 "not support requested special purpose register"));
11224 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11226 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11227 _("'APSR', 'CPSR' or 'SPSR' expected"));
11229 inst.instruction |= (flags & SPSR_BIT) >> 2;
11230 inst.instruction |= inst.operands[1].imm & 0xff;
11231 inst.instruction |= 0xf0000;
11241 if (do_vfp_nsyn_msr () == SUCCESS)
11244 constraint (!inst.operands[1].isreg,
11245 _("Thumb encoding does not support an immediate here"));
11247 if (inst.operands[0].isreg)
11248 flags = (int)(inst.operands[0].reg);
11250 flags = inst.operands[0].imm;
11252 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11254 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11256 /* PR gas/12698: The constraint is only applied for m_profile.
11257 If the user has specified -march=all, we want to ignore it as
11258 we are building for any CPU type, including non-m variants. */
11259 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11260 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11261 && (bits & ~(PSR_s | PSR_f)) != 0)
11262 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11263 && bits != PSR_f)) && m_profile,
11264 _("selected processor does not support requested special "
11265 "purpose register"));
11268 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11269 "requested special purpose register"));
11271 Rn = inst.operands[1].reg;
11272 reject_bad_reg (Rn);
11274 inst.instruction |= (flags & SPSR_BIT) >> 2;
11275 inst.instruction |= (flags & 0xf0000) >> 8;
11276 inst.instruction |= (flags & 0x300) >> 4;
11277 inst.instruction |= (flags & 0xff);
11278 inst.instruction |= Rn << 16;
11284 bfd_boolean narrow;
11285 unsigned Rd, Rn, Rm;
11287 if (!inst.operands[2].present)
11288 inst.operands[2].reg = inst.operands[0].reg;
11290 Rd = inst.operands[0].reg;
11291 Rn = inst.operands[1].reg;
11292 Rm = inst.operands[2].reg;
11294 if (unified_syntax)
11296 if (inst.size_req == 4
11302 else if (inst.instruction == T_MNEM_muls)
11303 narrow = !in_it_block ();
11305 narrow = in_it_block ();
11309 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
11310 constraint (Rn > 7 || Rm > 7,
11317 /* 16-bit MULS/Conditional MUL. */
11318 inst.instruction = THUMB_OP16 (inst.instruction);
11319 inst.instruction |= Rd;
11322 inst.instruction |= Rm << 3;
11324 inst.instruction |= Rn << 3;
11326 constraint (1, _("dest must overlap one source register"));
11330 constraint (inst.instruction != T_MNEM_mul,
11331 _("Thumb-2 MUL must not set flags"));
11333 inst.instruction = THUMB_OP32 (inst.instruction);
11334 inst.instruction |= Rd << 8;
11335 inst.instruction |= Rn << 16;
11336 inst.instruction |= Rm << 0;
11338 reject_bad_reg (Rd);
11339 reject_bad_reg (Rn);
11340 reject_bad_reg (Rm);
11347 unsigned RdLo, RdHi, Rn, Rm;
11349 RdLo = inst.operands[0].reg;
11350 RdHi = inst.operands[1].reg;
11351 Rn = inst.operands[2].reg;
11352 Rm = inst.operands[3].reg;
11354 reject_bad_reg (RdLo);
11355 reject_bad_reg (RdHi);
11356 reject_bad_reg (Rn);
11357 reject_bad_reg (Rm);
11359 inst.instruction |= RdLo << 12;
11360 inst.instruction |= RdHi << 8;
11361 inst.instruction |= Rn << 16;
11362 inst.instruction |= Rm;
11365 as_tsktsk (_("rdhi and rdlo must be different"));
11371 set_it_insn_type (NEUTRAL_IT_INSN);
11373 if (unified_syntax)
11375 if (inst.size_req == 4 || inst.operands[0].imm > 15)
11377 inst.instruction = THUMB_OP32 (inst.instruction);
11378 inst.instruction |= inst.operands[0].imm;
11382 /* PR9722: Check for Thumb2 availability before
11383 generating a thumb2 nop instruction. */
11384 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
11386 inst.instruction = THUMB_OP16 (inst.instruction);
11387 inst.instruction |= inst.operands[0].imm << 4;
11390 inst.instruction = 0x46c0;
11395 constraint (inst.operands[0].present,
11396 _("Thumb does not support NOP with hints"));
11397 inst.instruction = 0x46c0;
11404 if (unified_syntax)
11406 bfd_boolean narrow;
11408 if (THUMB_SETS_FLAGS (inst.instruction))
11409 narrow = !in_it_block ();
11411 narrow = in_it_block ();
11412 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11414 if (inst.size_req == 4)
11419 inst.instruction = THUMB_OP32 (inst.instruction);
11420 inst.instruction |= inst.operands[0].reg << 8;
11421 inst.instruction |= inst.operands[1].reg << 16;
11425 inst.instruction = THUMB_OP16 (inst.instruction);
11426 inst.instruction |= inst.operands[0].reg;
11427 inst.instruction |= inst.operands[1].reg << 3;
11432 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
11434 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11436 inst.instruction = THUMB_OP16 (inst.instruction);
11437 inst.instruction |= inst.operands[0].reg;
11438 inst.instruction |= inst.operands[1].reg << 3;
11447 Rd = inst.operands[0].reg;
11448 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
11450 reject_bad_reg (Rd);
11451 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
11452 reject_bad_reg (Rn);
11454 inst.instruction |= Rd << 8;
11455 inst.instruction |= Rn << 16;
11457 if (!inst.operands[2].isreg)
11459 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11460 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11466 Rm = inst.operands[2].reg;
11467 reject_bad_reg (Rm);
11469 constraint (inst.operands[2].shifted
11470 && inst.operands[2].immisreg,
11471 _("shift must be constant"));
11472 encode_thumb32_shifted_operand (2);
11479 unsigned Rd, Rn, Rm;
11481 Rd = inst.operands[0].reg;
11482 Rn = inst.operands[1].reg;
11483 Rm = inst.operands[2].reg;
11485 reject_bad_reg (Rd);
11486 reject_bad_reg (Rn);
11487 reject_bad_reg (Rm);
11489 inst.instruction |= Rd << 8;
11490 inst.instruction |= Rn << 16;
11491 inst.instruction |= Rm;
11492 if (inst.operands[3].present)
11494 unsigned int val = inst.reloc.exp.X_add_number;
11495 constraint (inst.reloc.exp.X_op != O_constant,
11496 _("expression too complex"));
11497 inst.instruction |= (val & 0x1c) << 10;
11498 inst.instruction |= (val & 0x03) << 6;
11505 if (!inst.operands[3].present)
11509 inst.instruction &= ~0x00000020;
11511 /* PR 10168. Swap the Rm and Rn registers. */
11512 Rtmp = inst.operands[1].reg;
11513 inst.operands[1].reg = inst.operands[2].reg;
11514 inst.operands[2].reg = Rtmp;
11522 if (inst.operands[0].immisreg)
11523 reject_bad_reg (inst.operands[0].imm);
11525 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
11529 do_t_push_pop (void)
11533 constraint (inst.operands[0].writeback,
11534 _("push/pop do not support {reglist}^"));
11535 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11536 _("expression too complex"));
11538 mask = inst.operands[0].imm;
11539 if ((mask & ~0xff) == 0)
11540 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
11541 else if ((inst.instruction == T_MNEM_push
11542 && (mask & ~0xff) == 1 << REG_LR)
11543 || (inst.instruction == T_MNEM_pop
11544 && (mask & ~0xff) == 1 << REG_PC))
11546 inst.instruction = THUMB_OP16 (inst.instruction);
11547 inst.instruction |= THUMB_PP_PC_LR;
11548 inst.instruction |= mask & 0xff;
11550 else if (unified_syntax)
11552 inst.instruction = THUMB_OP32 (inst.instruction);
11553 encode_thumb2_ldmstm (13, mask, TRUE);
11557 inst.error = _("invalid register list to push/pop instruction");
11567 Rd = inst.operands[0].reg;
11568 Rm = inst.operands[1].reg;
11570 reject_bad_reg (Rd);
11571 reject_bad_reg (Rm);
11573 inst.instruction |= Rd << 8;
11574 inst.instruction |= Rm << 16;
11575 inst.instruction |= Rm;
11583 Rd = inst.operands[0].reg;
11584 Rm = inst.operands[1].reg;
11586 reject_bad_reg (Rd);
11587 reject_bad_reg (Rm);
11589 if (Rd <= 7 && Rm <= 7
11590 && inst.size_req != 4)
11592 inst.instruction = THUMB_OP16 (inst.instruction);
11593 inst.instruction |= Rd;
11594 inst.instruction |= Rm << 3;
11596 else if (unified_syntax)
11598 inst.instruction = THUMB_OP32 (inst.instruction);
11599 inst.instruction |= Rd << 8;
11600 inst.instruction |= Rm << 16;
11601 inst.instruction |= Rm;
11604 inst.error = BAD_HIREG;
11612 Rd = inst.operands[0].reg;
11613 Rm = inst.operands[1].reg;
11615 reject_bad_reg (Rd);
11616 reject_bad_reg (Rm);
11618 inst.instruction |= Rd << 8;
11619 inst.instruction |= Rm;
11627 Rd = inst.operands[0].reg;
11628 Rs = (inst.operands[1].present
11629 ? inst.operands[1].reg /* Rd, Rs, foo */
11630 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
11632 reject_bad_reg (Rd);
11633 reject_bad_reg (Rs);
11634 if (inst.operands[2].isreg)
11635 reject_bad_reg (inst.operands[2].reg);
11637 inst.instruction |= Rd << 8;
11638 inst.instruction |= Rs << 16;
11639 if (!inst.operands[2].isreg)
11641 bfd_boolean narrow;
11643 if ((inst.instruction & 0x00100000) != 0)
11644 narrow = !in_it_block ();
11646 narrow = in_it_block ();
11648 if (Rd > 7 || Rs > 7)
11651 if (inst.size_req == 4 || !unified_syntax)
11654 if (inst.reloc.exp.X_op != O_constant
11655 || inst.reloc.exp.X_add_number != 0)
11658 /* Turn rsb #0 into 16-bit neg. We should probably do this via
11659 relaxation, but it doesn't seem worth the hassle. */
11662 inst.reloc.type = BFD_RELOC_UNUSED;
11663 inst.instruction = THUMB_OP16 (T_MNEM_negs);
11664 inst.instruction |= Rs << 3;
11665 inst.instruction |= Rd;
11669 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11670 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11674 encode_thumb32_shifted_operand (2);
11680 set_it_insn_type (OUTSIDE_IT_INSN);
11681 if (inst.operands[0].imm)
11682 inst.instruction |= 0x8;
11688 if (!inst.operands[1].present)
11689 inst.operands[1].reg = inst.operands[0].reg;
11691 if (unified_syntax)
11693 bfd_boolean narrow;
11696 switch (inst.instruction)
11699 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
11701 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
11703 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
11705 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
11709 if (THUMB_SETS_FLAGS (inst.instruction))
11710 narrow = !in_it_block ();
11712 narrow = in_it_block ();
11713 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
11715 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
11717 if (inst.operands[2].isreg
11718 && (inst.operands[1].reg != inst.operands[0].reg
11719 || inst.operands[2].reg > 7))
11721 if (inst.size_req == 4)
11724 reject_bad_reg (inst.operands[0].reg);
11725 reject_bad_reg (inst.operands[1].reg);
11729 if (inst.operands[2].isreg)
11731 reject_bad_reg (inst.operands[2].reg);
11732 inst.instruction = THUMB_OP32 (inst.instruction);
11733 inst.instruction |= inst.operands[0].reg << 8;
11734 inst.instruction |= inst.operands[1].reg << 16;
11735 inst.instruction |= inst.operands[2].reg;
11737 /* PR 12854: Error on extraneous shifts. */
11738 constraint (inst.operands[2].shifted,
11739 _("extraneous shift as part of operand to shift insn"));
11743 inst.operands[1].shifted = 1;
11744 inst.operands[1].shift_kind = shift_kind;
11745 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
11746 ? T_MNEM_movs : T_MNEM_mov);
11747 inst.instruction |= inst.operands[0].reg << 8;
11748 encode_thumb32_shifted_operand (1);
11749 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
11750 inst.reloc.type = BFD_RELOC_UNUSED;
11755 if (inst.operands[2].isreg)
11757 switch (shift_kind)
11759 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
11760 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
11761 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
11762 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
11766 inst.instruction |= inst.operands[0].reg;
11767 inst.instruction |= inst.operands[2].reg << 3;
11769 /* PR 12854: Error on extraneous shifts. */
11770 constraint (inst.operands[2].shifted,
11771 _("extraneous shift as part of operand to shift insn"));
11775 switch (shift_kind)
11777 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11778 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11779 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11782 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11783 inst.instruction |= inst.operands[0].reg;
11784 inst.instruction |= inst.operands[1].reg << 3;
11790 constraint (inst.operands[0].reg > 7
11791 || inst.operands[1].reg > 7, BAD_HIREG);
11792 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11794 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
11796 constraint (inst.operands[2].reg > 7, BAD_HIREG);
11797 constraint (inst.operands[0].reg != inst.operands[1].reg,
11798 _("source1 and dest must be same register"));
11800 switch (inst.instruction)
11802 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
11803 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
11804 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
11805 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
11809 inst.instruction |= inst.operands[0].reg;
11810 inst.instruction |= inst.operands[2].reg << 3;
11812 /* PR 12854: Error on extraneous shifts. */
11813 constraint (inst.operands[2].shifted,
11814 _("extraneous shift as part of operand to shift insn"));
11818 switch (inst.instruction)
11820 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
11821 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
11822 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
11823 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
11826 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11827 inst.instruction |= inst.operands[0].reg;
11828 inst.instruction |= inst.operands[1].reg << 3;
11836 unsigned Rd, Rn, Rm;
11838 Rd = inst.operands[0].reg;
11839 Rn = inst.operands[1].reg;
11840 Rm = inst.operands[2].reg;
11842 reject_bad_reg (Rd);
11843 reject_bad_reg (Rn);
11844 reject_bad_reg (Rm);
11846 inst.instruction |= Rd << 8;
11847 inst.instruction |= Rn << 16;
11848 inst.instruction |= Rm;
11854 unsigned Rd, Rn, Rm;
11856 Rd = inst.operands[0].reg;
11857 Rm = inst.operands[1].reg;
11858 Rn = inst.operands[2].reg;
11860 reject_bad_reg (Rd);
11861 reject_bad_reg (Rn);
11862 reject_bad_reg (Rm);
11864 inst.instruction |= Rd << 8;
11865 inst.instruction |= Rn << 16;
11866 inst.instruction |= Rm;
11872 unsigned int value = inst.reloc.exp.X_add_number;
11873 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
11874 _("SMC is not permitted on this architecture"));
11875 constraint (inst.reloc.exp.X_op != O_constant,
11876 _("expression too complex"));
11877 inst.reloc.type = BFD_RELOC_UNUSED;
11878 inst.instruction |= (value & 0xf000) >> 12;
11879 inst.instruction |= (value & 0x0ff0);
11880 inst.instruction |= (value & 0x000f) << 16;
11886 unsigned int value = inst.reloc.exp.X_add_number;
11888 inst.reloc.type = BFD_RELOC_UNUSED;
11889 inst.instruction |= (value & 0x0fff);
11890 inst.instruction |= (value & 0xf000) << 4;
11894 do_t_ssat_usat (int bias)
11898 Rd = inst.operands[0].reg;
11899 Rn = inst.operands[2].reg;
11901 reject_bad_reg (Rd);
11902 reject_bad_reg (Rn);
11904 inst.instruction |= Rd << 8;
11905 inst.instruction |= inst.operands[1].imm - bias;
11906 inst.instruction |= Rn << 16;
11908 if (inst.operands[3].present)
11910 offsetT shift_amount = inst.reloc.exp.X_add_number;
11912 inst.reloc.type = BFD_RELOC_UNUSED;
11914 constraint (inst.reloc.exp.X_op != O_constant,
11915 _("expression too complex"));
11917 if (shift_amount != 0)
11919 constraint (shift_amount > 31,
11920 _("shift expression is too large"));
11922 if (inst.operands[3].shift_kind == SHIFT_ASR)
11923 inst.instruction |= 0x00200000; /* sh bit. */
11925 inst.instruction |= (shift_amount & 0x1c) << 10;
11926 inst.instruction |= (shift_amount & 0x03) << 6;
11934 do_t_ssat_usat (1);
11942 Rd = inst.operands[0].reg;
11943 Rn = inst.operands[2].reg;
11945 reject_bad_reg (Rd);
11946 reject_bad_reg (Rn);
11948 inst.instruction |= Rd << 8;
11949 inst.instruction |= inst.operands[1].imm - 1;
11950 inst.instruction |= Rn << 16;
11956 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
11957 || inst.operands[2].postind || inst.operands[2].writeback
11958 || inst.operands[2].immisreg || inst.operands[2].shifted
11959 || inst.operands[2].negative,
11962 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
11964 inst.instruction |= inst.operands[0].reg << 8;
11965 inst.instruction |= inst.operands[1].reg << 12;
11966 inst.instruction |= inst.operands[2].reg << 16;
11967 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11973 if (!inst.operands[2].present)
11974 inst.operands[2].reg = inst.operands[1].reg + 1;
11976 constraint (inst.operands[0].reg == inst.operands[1].reg
11977 || inst.operands[0].reg == inst.operands[2].reg
11978 || inst.operands[0].reg == inst.operands[3].reg,
11981 inst.instruction |= inst.operands[0].reg;
11982 inst.instruction |= inst.operands[1].reg << 12;
11983 inst.instruction |= inst.operands[2].reg << 8;
11984 inst.instruction |= inst.operands[3].reg << 16;
11990 unsigned Rd, Rn, Rm;
11992 Rd = inst.operands[0].reg;
11993 Rn = inst.operands[1].reg;
11994 Rm = inst.operands[2].reg;
11996 reject_bad_reg (Rd);
11997 reject_bad_reg (Rn);
11998 reject_bad_reg (Rm);
12000 inst.instruction |= Rd << 8;
12001 inst.instruction |= Rn << 16;
12002 inst.instruction |= Rm;
12003 inst.instruction |= inst.operands[3].imm << 4;
12011 Rd = inst.operands[0].reg;
12012 Rm = inst.operands[1].reg;
12014 reject_bad_reg (Rd);
12015 reject_bad_reg (Rm);
12017 if (inst.instruction <= 0xffff
12018 && inst.size_req != 4
12019 && Rd <= 7 && Rm <= 7
12020 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12022 inst.instruction = THUMB_OP16 (inst.instruction);
12023 inst.instruction |= Rd;
12024 inst.instruction |= Rm << 3;
12026 else if (unified_syntax)
12028 if (inst.instruction <= 0xffff)
12029 inst.instruction = THUMB_OP32 (inst.instruction);
12030 inst.instruction |= Rd << 8;
12031 inst.instruction |= Rm;
12032 inst.instruction |= inst.operands[2].imm << 4;
12036 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12037 _("Thumb encoding does not support rotation"));
12038 constraint (1, BAD_HIREG);
12045 /* We have to do the following check manually as ARM_EXT_OS only applies
12047 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12049 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12050 /* This only applies to the v6m howver, not later architectures. */
12051 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12052 as_bad (_("SVC is not permitted on this architecture"));
12053 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12056 inst.reloc.type = BFD_RELOC_ARM_SWI;
12065 half = (inst.instruction & 0x10) != 0;
12066 set_it_insn_type_last ();
12067 constraint (inst.operands[0].immisreg,
12068 _("instruction requires register index"));
12070 Rn = inst.operands[0].reg;
12071 Rm = inst.operands[0].imm;
12073 constraint (Rn == REG_SP, BAD_SP);
12074 reject_bad_reg (Rm);
12076 constraint (!half && inst.operands[0].shifted,
12077 _("instruction does not allow shifted index"));
12078 inst.instruction |= (Rn << 16) | Rm;
12084 do_t_ssat_usat (0);
12092 Rd = inst.operands[0].reg;
12093 Rn = inst.operands[2].reg;
12095 reject_bad_reg (Rd);
12096 reject_bad_reg (Rn);
12098 inst.instruction |= Rd << 8;
12099 inst.instruction |= inst.operands[1].imm;
12100 inst.instruction |= Rn << 16;
12103 /* Neon instruction encoder helpers. */
12105 /* Encodings for the different types for various Neon opcodes. */
12107 /* An "invalid" code for the following tables. */
12110 struct neon_tab_entry
12113 unsigned float_or_poly;
12114 unsigned scalar_or_imm;
12117 /* Map overloaded Neon opcodes to their respective encodings. */
12118 #define NEON_ENC_TAB \
12119 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12120 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12121 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12122 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12123 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12124 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12125 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12126 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12127 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12128 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12129 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12130 /* Register variants of the following two instructions are encoded as
12131 vcge / vcgt with the operands reversed. */ \
12132 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12133 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12134 X(vfma, N_INV, 0x0000c10, N_INV), \
12135 X(vfms, N_INV, 0x0200c10, N_INV), \
12136 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12137 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12138 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12139 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12140 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12141 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12142 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12143 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12144 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12145 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12146 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12147 X(vshl, 0x0000400, N_INV, 0x0800510), \
12148 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12149 X(vand, 0x0000110, N_INV, 0x0800030), \
12150 X(vbic, 0x0100110, N_INV, 0x0800030), \
12151 X(veor, 0x1000110, N_INV, N_INV), \
12152 X(vorn, 0x0300110, N_INV, 0x0800010), \
12153 X(vorr, 0x0200110, N_INV, 0x0800010), \
12154 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12155 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12156 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12157 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12158 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12159 X(vst1, 0x0000000, 0x0800000, N_INV), \
12160 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12161 X(vst2, 0x0000100, 0x0800100, N_INV), \
12162 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12163 X(vst3, 0x0000200, 0x0800200, N_INV), \
12164 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12165 X(vst4, 0x0000300, 0x0800300, N_INV), \
12166 X(vmovn, 0x1b20200, N_INV, N_INV), \
12167 X(vtrn, 0x1b20080, N_INV, N_INV), \
12168 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12169 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12170 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12171 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12172 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12173 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12174 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12175 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12176 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12177 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12178 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
12182 #define X(OPC,I,F,S) N_MNEM_##OPC
12187 static const struct neon_tab_entry neon_enc_tab[] =
12189 #define X(OPC,I,F,S) { (I), (F), (S) }
12194 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12195 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12196 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12197 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12198 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12199 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12200 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12201 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12202 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12203 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12204 #define NEON_ENC_SINGLE_(X) \
12205 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12206 #define NEON_ENC_DOUBLE_(X) \
12207 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12209 #define NEON_ENCODE(type, inst) \
12212 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12213 inst.is_neon = 1; \
12217 #define check_neon_suffixes \
12220 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12222 as_bad (_("invalid neon suffix for non neon instruction")); \
12228 /* Define shapes for instruction operands. The following mnemonic characters
12229 are used in this table:
12231 F - VFP S<n> register
12232 D - Neon D<n> register
12233 Q - Neon Q<n> register
12237 L - D<n> register list
12239 This table is used to generate various data:
12240 - enumerations of the form NS_DDR to be used as arguments to
12242 - a table classifying shapes into single, double, quad, mixed.
12243 - a table used to drive neon_select_shape. */
12245 #define NEON_SHAPE_DEF \
12246 X(3, (D, D, D), DOUBLE), \
12247 X(3, (Q, Q, Q), QUAD), \
12248 X(3, (D, D, I), DOUBLE), \
12249 X(3, (Q, Q, I), QUAD), \
12250 X(3, (D, D, S), DOUBLE), \
12251 X(3, (Q, Q, S), QUAD), \
12252 X(2, (D, D), DOUBLE), \
12253 X(2, (Q, Q), QUAD), \
12254 X(2, (D, S), DOUBLE), \
12255 X(2, (Q, S), QUAD), \
12256 X(2, (D, R), DOUBLE), \
12257 X(2, (Q, R), QUAD), \
12258 X(2, (D, I), DOUBLE), \
12259 X(2, (Q, I), QUAD), \
12260 X(3, (D, L, D), DOUBLE), \
12261 X(2, (D, Q), MIXED), \
12262 X(2, (Q, D), MIXED), \
12263 X(3, (D, Q, I), MIXED), \
12264 X(3, (Q, D, I), MIXED), \
12265 X(3, (Q, D, D), MIXED), \
12266 X(3, (D, Q, Q), MIXED), \
12267 X(3, (Q, Q, D), MIXED), \
12268 X(3, (Q, D, S), MIXED), \
12269 X(3, (D, Q, S), MIXED), \
12270 X(4, (D, D, D, I), DOUBLE), \
12271 X(4, (Q, Q, Q, I), QUAD), \
12272 X(2, (F, F), SINGLE), \
12273 X(3, (F, F, F), SINGLE), \
12274 X(2, (F, I), SINGLE), \
12275 X(2, (F, D), MIXED), \
12276 X(2, (D, F), MIXED), \
12277 X(3, (F, F, I), MIXED), \
12278 X(4, (R, R, F, F), SINGLE), \
12279 X(4, (F, F, R, R), SINGLE), \
12280 X(3, (D, R, R), DOUBLE), \
12281 X(3, (R, R, D), DOUBLE), \
12282 X(2, (S, R), SINGLE), \
12283 X(2, (R, S), SINGLE), \
12284 X(2, (F, R), SINGLE), \
12285 X(2, (R, F), SINGLE)
12287 #define S2(A,B) NS_##A##B
12288 #define S3(A,B,C) NS_##A##B##C
12289 #define S4(A,B,C,D) NS_##A##B##C##D
12291 #define X(N, L, C) S##N L
12304 enum neon_shape_class
12312 #define X(N, L, C) SC_##C
12314 static enum neon_shape_class neon_shape_class[] =
12332 /* Register widths of above. */
12333 static unsigned neon_shape_el_size[] =
12344 struct neon_shape_info
12347 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
12350 #define S2(A,B) { SE_##A, SE_##B }
12351 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
12352 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
12354 #define X(N, L, C) { N, S##N L }
12356 static struct neon_shape_info neon_shape_tab[] =
12366 /* Bit masks used in type checking given instructions.
12367 'N_EQK' means the type must be the same as (or based on in some way) the key
12368 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
12369 set, various other bits can be set as well in order to modify the meaning of
12370 the type constraint. */
12372 enum neon_type_mask
12395 N_KEY = 0x1000000, /* Key element (main type specifier). */
12396 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
12397 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
12398 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
12399 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
12400 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
12401 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
12402 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
12403 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
12404 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
12406 N_MAX_NONSPECIAL = N_F64
12409 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
12411 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
12412 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
12413 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
12414 #define N_SUF_32 (N_SU_32 | N_F32)
12415 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
12416 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
12418 /* Pass this as the first type argument to neon_check_type to ignore types
12420 #define N_IGNORE_TYPE (N_KEY | N_EQK)
12422 /* Select a "shape" for the current instruction (describing register types or
12423 sizes) from a list of alternatives. Return NS_NULL if the current instruction
12424 doesn't fit. For non-polymorphic shapes, checking is usually done as a
12425 function of operand parsing, so this function doesn't need to be called.
12426 Shapes should be listed in order of decreasing length. */
12428 static enum neon_shape
12429 neon_select_shape (enum neon_shape shape, ...)
12432 enum neon_shape first_shape = shape;
12434 /* Fix missing optional operands. FIXME: we don't know at this point how
12435 many arguments we should have, so this makes the assumption that we have
12436 > 1. This is true of all current Neon opcodes, I think, but may not be
12437 true in the future. */
12438 if (!inst.operands[1].present)
12439 inst.operands[1] = inst.operands[0];
12441 va_start (ap, shape);
12443 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
12448 for (j = 0; j < neon_shape_tab[shape].els; j++)
12450 if (!inst.operands[j].present)
12456 switch (neon_shape_tab[shape].el[j])
12459 if (!(inst.operands[j].isreg
12460 && inst.operands[j].isvec
12461 && inst.operands[j].issingle
12462 && !inst.operands[j].isquad))
12467 if (!(inst.operands[j].isreg
12468 && inst.operands[j].isvec
12469 && !inst.operands[j].isquad
12470 && !inst.operands[j].issingle))
12475 if (!(inst.operands[j].isreg
12476 && !inst.operands[j].isvec))
12481 if (!(inst.operands[j].isreg
12482 && inst.operands[j].isvec
12483 && inst.operands[j].isquad
12484 && !inst.operands[j].issingle))
12489 if (!(!inst.operands[j].isreg
12490 && !inst.operands[j].isscalar))
12495 if (!(!inst.operands[j].isreg
12496 && inst.operands[j].isscalar))
12506 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
12507 /* We've matched all the entries in the shape table, and we don't
12508 have any left over operands which have not been matched. */
12514 if (shape == NS_NULL && first_shape != NS_NULL)
12515 first_error (_("invalid instruction shape"));
12520 /* True if SHAPE is predominantly a quadword operation (most of the time, this
12521 means the Q bit should be set). */
12524 neon_quad (enum neon_shape shape)
12526 return neon_shape_class[shape] == SC_QUAD;
12530 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
12533 /* Allow modification to be made to types which are constrained to be
12534 based on the key element, based on bits set alongside N_EQK. */
12535 if ((typebits & N_EQK) != 0)
12537 if ((typebits & N_HLF) != 0)
12539 else if ((typebits & N_DBL) != 0)
12541 if ((typebits & N_SGN) != 0)
12542 *g_type = NT_signed;
12543 else if ((typebits & N_UNS) != 0)
12544 *g_type = NT_unsigned;
12545 else if ((typebits & N_INT) != 0)
12546 *g_type = NT_integer;
12547 else if ((typebits & N_FLT) != 0)
12548 *g_type = NT_float;
12549 else if ((typebits & N_SIZ) != 0)
12550 *g_type = NT_untyped;
12554 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
12555 operand type, i.e. the single type specified in a Neon instruction when it
12556 is the only one given. */
12558 static struct neon_type_el
12559 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
12561 struct neon_type_el dest = *key;
12563 gas_assert ((thisarg & N_EQK) != 0);
12565 neon_modify_type_size (thisarg, &dest.type, &dest.size);
12570 /* Convert Neon type and size into compact bitmask representation. */
12572 static enum neon_type_mask
12573 type_chk_of_el_type (enum neon_el_type type, unsigned size)
12580 case 8: return N_8;
12581 case 16: return N_16;
12582 case 32: return N_32;
12583 case 64: return N_64;
12591 case 8: return N_I8;
12592 case 16: return N_I16;
12593 case 32: return N_I32;
12594 case 64: return N_I64;
12602 case 16: return N_F16;
12603 case 32: return N_F32;
12604 case 64: return N_F64;
12612 case 8: return N_P8;
12613 case 16: return N_P16;
12621 case 8: return N_S8;
12622 case 16: return N_S16;
12623 case 32: return N_S32;
12624 case 64: return N_S64;
12632 case 8: return N_U8;
12633 case 16: return N_U16;
12634 case 32: return N_U32;
12635 case 64: return N_U64;
12646 /* Convert compact Neon bitmask type representation to a type and size. Only
12647 handles the case where a single bit is set in the mask. */
12650 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
12651 enum neon_type_mask mask)
12653 if ((mask & N_EQK) != 0)
12656 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
12658 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
12660 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
12662 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
12667 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
12669 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
12670 *type = NT_unsigned;
12671 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
12672 *type = NT_integer;
12673 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
12674 *type = NT_untyped;
12675 else if ((mask & (N_P8 | N_P16)) != 0)
12677 else if ((mask & (N_F32 | N_F64)) != 0)
12685 /* Modify a bitmask of allowed types. This is only needed for type
12689 modify_types_allowed (unsigned allowed, unsigned mods)
12692 enum neon_el_type type;
12698 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
12700 if (el_type_of_type_chk (&type, &size,
12701 (enum neon_type_mask) (allowed & i)) == SUCCESS)
12703 neon_modify_type_size (mods, &type, &size);
12704 destmask |= type_chk_of_el_type (type, size);
12711 /* Check type and return type classification.
12712 The manual states (paraphrase): If one datatype is given, it indicates the
12714 - the second operand, if there is one
12715 - the operand, if there is no second operand
12716 - the result, if there are no operands.
12717 This isn't quite good enough though, so we use a concept of a "key" datatype
12718 which is set on a per-instruction basis, which is the one which matters when
12719 only one data type is written.
12720 Note: this function has side-effects (e.g. filling in missing operands). All
12721 Neon instructions should call it before performing bit encoding. */
12723 static struct neon_type_el
12724 neon_check_type (unsigned els, enum neon_shape ns, ...)
12727 unsigned i, pass, key_el = 0;
12728 unsigned types[NEON_MAX_TYPE_ELS];
12729 enum neon_el_type k_type = NT_invtype;
12730 unsigned k_size = -1u;
12731 struct neon_type_el badtype = {NT_invtype, -1};
12732 unsigned key_allowed = 0;
12734 /* Optional registers in Neon instructions are always (not) in operand 1.
12735 Fill in the missing operand here, if it was omitted. */
12736 if (els > 1 && !inst.operands[1].present)
12737 inst.operands[1] = inst.operands[0];
12739 /* Suck up all the varargs. */
12741 for (i = 0; i < els; i++)
12743 unsigned thisarg = va_arg (ap, unsigned);
12744 if (thisarg == N_IGNORE_TYPE)
12749 types[i] = thisarg;
12750 if ((thisarg & N_KEY) != 0)
12755 if (inst.vectype.elems > 0)
12756 for (i = 0; i < els; i++)
12757 if (inst.operands[i].vectype.type != NT_invtype)
12759 first_error (_("types specified in both the mnemonic and operands"));
12763 /* Duplicate inst.vectype elements here as necessary.
12764 FIXME: No idea if this is exactly the same as the ARM assembler,
12765 particularly when an insn takes one register and one non-register
12767 if (inst.vectype.elems == 1 && els > 1)
12770 inst.vectype.elems = els;
12771 inst.vectype.el[key_el] = inst.vectype.el[0];
12772 for (j = 0; j < els; j++)
12774 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12777 else if (inst.vectype.elems == 0 && els > 0)
12780 /* No types were given after the mnemonic, so look for types specified
12781 after each operand. We allow some flexibility here; as long as the
12782 "key" operand has a type, we can infer the others. */
12783 for (j = 0; j < els; j++)
12784 if (inst.operands[j].vectype.type != NT_invtype)
12785 inst.vectype.el[j] = inst.operands[j].vectype;
12787 if (inst.operands[key_el].vectype.type != NT_invtype)
12789 for (j = 0; j < els; j++)
12790 if (inst.operands[j].vectype.type == NT_invtype)
12791 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
12796 first_error (_("operand types can't be inferred"));
12800 else if (inst.vectype.elems != els)
12802 first_error (_("type specifier has the wrong number of parts"));
12806 for (pass = 0; pass < 2; pass++)
12808 for (i = 0; i < els; i++)
12810 unsigned thisarg = types[i];
12811 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
12812 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
12813 enum neon_el_type g_type = inst.vectype.el[i].type;
12814 unsigned g_size = inst.vectype.el[i].size;
12816 /* Decay more-specific signed & unsigned types to sign-insensitive
12817 integer types if sign-specific variants are unavailable. */
12818 if ((g_type == NT_signed || g_type == NT_unsigned)
12819 && (types_allowed & N_SU_ALL) == 0)
12820 g_type = NT_integer;
12822 /* If only untyped args are allowed, decay any more specific types to
12823 them. Some instructions only care about signs for some element
12824 sizes, so handle that properly. */
12825 if ((g_size == 8 && (types_allowed & N_8) != 0)
12826 || (g_size == 16 && (types_allowed & N_16) != 0)
12827 || (g_size == 32 && (types_allowed & N_32) != 0)
12828 || (g_size == 64 && (types_allowed & N_64) != 0))
12829 g_type = NT_untyped;
12833 if ((thisarg & N_KEY) != 0)
12837 key_allowed = thisarg & ~N_KEY;
12842 if ((thisarg & N_VFP) != 0)
12844 enum neon_shape_el regshape;
12845 unsigned regwidth, match;
12847 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
12850 first_error (_("invalid instruction shape"));
12853 regshape = neon_shape_tab[ns].el[i];
12854 regwidth = neon_shape_el_size[regshape];
12856 /* In VFP mode, operands must match register widths. If we
12857 have a key operand, use its width, else use the width of
12858 the current operand. */
12864 if (regwidth != match)
12866 first_error (_("operand size must match register width"));
12871 if ((thisarg & N_EQK) == 0)
12873 unsigned given_type = type_chk_of_el_type (g_type, g_size);
12875 if ((given_type & types_allowed) == 0)
12877 first_error (_("bad type in Neon instruction"));
12883 enum neon_el_type mod_k_type = k_type;
12884 unsigned mod_k_size = k_size;
12885 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
12886 if (g_type != mod_k_type || g_size != mod_k_size)
12888 first_error (_("inconsistent types in Neon instruction"));
12896 return inst.vectype.el[key_el];
12899 /* Neon-style VFP instruction forwarding. */
12901 /* Thumb VFP instructions have 0xE in the condition field. */
12904 do_vfp_cond_or_thumb (void)
12909 inst.instruction |= 0xe0000000;
12911 inst.instruction |= inst.cond << 28;
12914 /* Look up and encode a simple mnemonic, for use as a helper function for the
12915 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
12916 etc. It is assumed that operand parsing has already been done, and that the
12917 operands are in the form expected by the given opcode (this isn't necessarily
12918 the same as the form in which they were parsed, hence some massaging must
12919 take place before this function is called).
12920 Checks current arch version against that in the looked-up opcode. */
12923 do_vfp_nsyn_opcode (const char *opname)
12925 const struct asm_opcode *opcode;
12927 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
12932 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
12933 thumb_mode ? *opcode->tvariant : *opcode->avariant),
12940 inst.instruction = opcode->tvalue;
12941 opcode->tencode ();
12945 inst.instruction = (inst.cond << 28) | opcode->avalue;
12946 opcode->aencode ();
12951 do_vfp_nsyn_add_sub (enum neon_shape rs)
12953 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
12958 do_vfp_nsyn_opcode ("fadds");
12960 do_vfp_nsyn_opcode ("fsubs");
12965 do_vfp_nsyn_opcode ("faddd");
12967 do_vfp_nsyn_opcode ("fsubd");
12971 /* Check operand types to see if this is a VFP instruction, and if so call
12975 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
12977 enum neon_shape rs;
12978 struct neon_type_el et;
12983 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
12984 et = neon_check_type (2, rs,
12985 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12989 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
12990 et = neon_check_type (3, rs,
12991 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
12998 if (et.type != NT_invtype)
13009 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13011 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13016 do_vfp_nsyn_opcode ("fmacs");
13018 do_vfp_nsyn_opcode ("fnmacs");
13023 do_vfp_nsyn_opcode ("fmacd");
13025 do_vfp_nsyn_opcode ("fnmacd");
13030 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13032 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13037 do_vfp_nsyn_opcode ("ffmas");
13039 do_vfp_nsyn_opcode ("ffnmas");
13044 do_vfp_nsyn_opcode ("ffmad");
13046 do_vfp_nsyn_opcode ("ffnmad");
13051 do_vfp_nsyn_mul (enum neon_shape rs)
13054 do_vfp_nsyn_opcode ("fmuls");
13056 do_vfp_nsyn_opcode ("fmuld");
13060 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13062 int is_neg = (inst.instruction & 0x80) != 0;
13063 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13068 do_vfp_nsyn_opcode ("fnegs");
13070 do_vfp_nsyn_opcode ("fabss");
13075 do_vfp_nsyn_opcode ("fnegd");
13077 do_vfp_nsyn_opcode ("fabsd");
13081 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13082 insns belong to Neon, and are handled elsewhere. */
13085 do_vfp_nsyn_ldm_stm (int is_dbmode)
13087 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13091 do_vfp_nsyn_opcode ("fldmdbs");
13093 do_vfp_nsyn_opcode ("fldmias");
13098 do_vfp_nsyn_opcode ("fstmdbs");
13100 do_vfp_nsyn_opcode ("fstmias");
13105 do_vfp_nsyn_sqrt (void)
13107 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13108 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13111 do_vfp_nsyn_opcode ("fsqrts");
13113 do_vfp_nsyn_opcode ("fsqrtd");
13117 do_vfp_nsyn_div (void)
13119 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13120 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13121 N_F32 | N_F64 | N_KEY | N_VFP);
13124 do_vfp_nsyn_opcode ("fdivs");
13126 do_vfp_nsyn_opcode ("fdivd");
13130 do_vfp_nsyn_nmul (void)
13132 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13133 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13134 N_F32 | N_F64 | N_KEY | N_VFP);
13138 NEON_ENCODE (SINGLE, inst);
13139 do_vfp_sp_dyadic ();
13143 NEON_ENCODE (DOUBLE, inst);
13144 do_vfp_dp_rd_rn_rm ();
13146 do_vfp_cond_or_thumb ();
13150 do_vfp_nsyn_cmp (void)
13152 if (inst.operands[1].isreg)
13154 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13155 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13159 NEON_ENCODE (SINGLE, inst);
13160 do_vfp_sp_monadic ();
13164 NEON_ENCODE (DOUBLE, inst);
13165 do_vfp_dp_rd_rm ();
13170 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13171 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13173 switch (inst.instruction & 0x0fffffff)
13176 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13179 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13187 NEON_ENCODE (SINGLE, inst);
13188 do_vfp_sp_compare_z ();
13192 NEON_ENCODE (DOUBLE, inst);
13196 do_vfp_cond_or_thumb ();
13200 nsyn_insert_sp (void)
13202 inst.operands[1] = inst.operands[0];
13203 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13204 inst.operands[0].reg = REG_SP;
13205 inst.operands[0].isreg = 1;
13206 inst.operands[0].writeback = 1;
13207 inst.operands[0].present = 1;
13211 do_vfp_nsyn_push (void)
13214 if (inst.operands[1].issingle)
13215 do_vfp_nsyn_opcode ("fstmdbs");
13217 do_vfp_nsyn_opcode ("fstmdbd");
13221 do_vfp_nsyn_pop (void)
13224 if (inst.operands[1].issingle)
13225 do_vfp_nsyn_opcode ("fldmias");
13227 do_vfp_nsyn_opcode ("fldmiad");
13230 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13231 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13234 neon_dp_fixup (struct arm_it* insn)
13236 unsigned int i = insn->instruction;
13241 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13252 insn->instruction = i;
13255 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
13259 neon_logbits (unsigned x)
13261 return ffs (x) - 4;
13264 #define LOW4(R) ((R) & 0xf)
13265 #define HI1(R) (((R) >> 4) & 1)
13267 /* Encode insns with bit pattern:
13269 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
13270 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
13272 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
13273 different meaning for some instruction. */
13276 neon_three_same (int isquad, int ubit, int size)
13278 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13279 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13280 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13281 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13282 inst.instruction |= LOW4 (inst.operands[2].reg);
13283 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13284 inst.instruction |= (isquad != 0) << 6;
13285 inst.instruction |= (ubit != 0) << 24;
13287 inst.instruction |= neon_logbits (size) << 20;
13289 neon_dp_fixup (&inst);
13292 /* Encode instructions of the form:
13294 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
13295 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
13297 Don't write size if SIZE == -1. */
13300 neon_two_same (int qbit, int ubit, int size)
13302 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13303 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13304 inst.instruction |= LOW4 (inst.operands[1].reg);
13305 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13306 inst.instruction |= (qbit != 0) << 6;
13307 inst.instruction |= (ubit != 0) << 24;
13310 inst.instruction |= neon_logbits (size) << 18;
13312 neon_dp_fixup (&inst);
13315 /* Neon instruction encoders, in approximate order of appearance. */
13318 do_neon_dyadic_i_su (void)
13320 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13321 struct neon_type_el et = neon_check_type (3, rs,
13322 N_EQK, N_EQK, N_SU_32 | N_KEY);
13323 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13327 do_neon_dyadic_i64_su (void)
13329 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13330 struct neon_type_el et = neon_check_type (3, rs,
13331 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13332 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13336 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
13339 unsigned size = et.size >> 3;
13340 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13341 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13342 inst.instruction |= LOW4 (inst.operands[1].reg);
13343 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13344 inst.instruction |= (isquad != 0) << 6;
13345 inst.instruction |= immbits << 16;
13346 inst.instruction |= (size >> 3) << 7;
13347 inst.instruction |= (size & 0x7) << 19;
13349 inst.instruction |= (uval != 0) << 24;
13351 neon_dp_fixup (&inst);
13355 do_neon_shl_imm (void)
13357 if (!inst.operands[2].isreg)
13359 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13360 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
13361 NEON_ENCODE (IMMED, inst);
13362 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
13366 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13367 struct neon_type_el et = neon_check_type (3, rs,
13368 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13371 /* VSHL/VQSHL 3-register variants have syntax such as:
13373 whereas other 3-register operations encoded by neon_three_same have
13376 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
13378 tmp = inst.operands[2].reg;
13379 inst.operands[2].reg = inst.operands[1].reg;
13380 inst.operands[1].reg = tmp;
13381 NEON_ENCODE (INTEGER, inst);
13382 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13387 do_neon_qshl_imm (void)
13389 if (!inst.operands[2].isreg)
13391 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13392 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13394 NEON_ENCODE (IMMED, inst);
13395 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13396 inst.operands[2].imm);
13400 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13401 struct neon_type_el et = neon_check_type (3, rs,
13402 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
13405 /* See note in do_neon_shl_imm. */
13406 tmp = inst.operands[2].reg;
13407 inst.operands[2].reg = inst.operands[1].reg;
13408 inst.operands[1].reg = tmp;
13409 NEON_ENCODE (INTEGER, inst);
13410 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13415 do_neon_rshl (void)
13417 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13418 struct neon_type_el et = neon_check_type (3, rs,
13419 N_EQK, N_EQK, N_SU_ALL | N_KEY);
13422 tmp = inst.operands[2].reg;
13423 inst.operands[2].reg = inst.operands[1].reg;
13424 inst.operands[1].reg = tmp;
13425 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
13429 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
13431 /* Handle .I8 pseudo-instructions. */
13434 /* Unfortunately, this will make everything apart from zero out-of-range.
13435 FIXME is this the intended semantics? There doesn't seem much point in
13436 accepting .I8 if so. */
13437 immediate |= immediate << 8;
13443 if (immediate == (immediate & 0x000000ff))
13445 *immbits = immediate;
13448 else if (immediate == (immediate & 0x0000ff00))
13450 *immbits = immediate >> 8;
13453 else if (immediate == (immediate & 0x00ff0000))
13455 *immbits = immediate >> 16;
13458 else if (immediate == (immediate & 0xff000000))
13460 *immbits = immediate >> 24;
13463 if ((immediate & 0xffff) != (immediate >> 16))
13464 goto bad_immediate;
13465 immediate &= 0xffff;
13468 if (immediate == (immediate & 0x000000ff))
13470 *immbits = immediate;
13473 else if (immediate == (immediate & 0x0000ff00))
13475 *immbits = immediate >> 8;
13480 first_error (_("immediate value out of range"));
13484 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
13488 neon_bits_same_in_bytes (unsigned imm)
13490 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
13491 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
13492 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
13493 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
13496 /* For immediate of above form, return 0bABCD. */
13499 neon_squash_bits (unsigned imm)
13501 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
13502 | ((imm & 0x01000000) >> 21);
13505 /* Compress quarter-float representation to 0b...000 abcdefgh. */
13508 neon_qfloat_bits (unsigned imm)
13510 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
13513 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
13514 the instruction. *OP is passed as the initial value of the op field, and
13515 may be set to a different value depending on the constant (i.e.
13516 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
13517 MVN). If the immediate looks like a repeated pattern then also
13518 try smaller element sizes. */
13521 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
13522 unsigned *immbits, int *op, int size,
13523 enum neon_el_type type)
13525 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
13527 if (type == NT_float && !float_p)
13530 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
13532 if (size != 32 || *op == 1)
13534 *immbits = neon_qfloat_bits (immlo);
13540 if (neon_bits_same_in_bytes (immhi)
13541 && neon_bits_same_in_bytes (immlo))
13545 *immbits = (neon_squash_bits (immhi) << 4)
13546 | neon_squash_bits (immlo);
13551 if (immhi != immlo)
13557 if (immlo == (immlo & 0x000000ff))
13562 else if (immlo == (immlo & 0x0000ff00))
13564 *immbits = immlo >> 8;
13567 else if (immlo == (immlo & 0x00ff0000))
13569 *immbits = immlo >> 16;
13572 else if (immlo == (immlo & 0xff000000))
13574 *immbits = immlo >> 24;
13577 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
13579 *immbits = (immlo >> 8) & 0xff;
13582 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
13584 *immbits = (immlo >> 16) & 0xff;
13588 if ((immlo & 0xffff) != (immlo >> 16))
13595 if (immlo == (immlo & 0x000000ff))
13600 else if (immlo == (immlo & 0x0000ff00))
13602 *immbits = immlo >> 8;
13606 if ((immlo & 0xff) != (immlo >> 8))
13611 if (immlo == (immlo & 0x000000ff))
13613 /* Don't allow MVN with 8-bit immediate. */
13623 /* Write immediate bits [7:0] to the following locations:
13625 |28/24|23 19|18 16|15 4|3 0|
13626 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
13628 This function is used by VMOV/VMVN/VORR/VBIC. */
13631 neon_write_immbits (unsigned immbits)
13633 inst.instruction |= immbits & 0xf;
13634 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
13635 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
13638 /* Invert low-order SIZE bits of XHI:XLO. */
13641 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
13643 unsigned immlo = xlo ? *xlo : 0;
13644 unsigned immhi = xhi ? *xhi : 0;
13649 immlo = (~immlo) & 0xff;
13653 immlo = (~immlo) & 0xffff;
13657 immhi = (~immhi) & 0xffffffff;
13658 /* fall through. */
13661 immlo = (~immlo) & 0xffffffff;
13676 do_neon_logic (void)
13678 if (inst.operands[2].present && inst.operands[2].isreg)
13680 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13681 neon_check_type (3, rs, N_IGNORE_TYPE);
13682 /* U bit and size field were set as part of the bitmask. */
13683 NEON_ENCODE (INTEGER, inst);
13684 neon_three_same (neon_quad (rs), 0, -1);
13688 const int three_ops_form = (inst.operands[2].present
13689 && !inst.operands[2].isreg);
13690 const int immoperand = (three_ops_form ? 2 : 1);
13691 enum neon_shape rs = (three_ops_form
13692 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
13693 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
13694 struct neon_type_el et = neon_check_type (2, rs,
13695 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
13696 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
13700 if (et.type == NT_invtype)
13703 if (three_ops_form)
13704 constraint (inst.operands[0].reg != inst.operands[1].reg,
13705 _("first and second operands shall be the same register"));
13707 NEON_ENCODE (IMMED, inst);
13709 immbits = inst.operands[immoperand].imm;
13712 /* .i64 is a pseudo-op, so the immediate must be a repeating
13714 if (immbits != (inst.operands[immoperand].regisimm ?
13715 inst.operands[immoperand].reg : 0))
13717 /* Set immbits to an invalid constant. */
13718 immbits = 0xdeadbeef;
13725 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13729 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13733 /* Pseudo-instruction for VBIC. */
13734 neon_invert_size (&immbits, 0, et.size);
13735 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13739 /* Pseudo-instruction for VORR. */
13740 neon_invert_size (&immbits, 0, et.size);
13741 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
13751 inst.instruction |= neon_quad (rs) << 6;
13752 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13753 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13754 inst.instruction |= cmode << 8;
13755 neon_write_immbits (immbits);
13757 neon_dp_fixup (&inst);
13762 do_neon_bitfield (void)
13764 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13765 neon_check_type (3, rs, N_IGNORE_TYPE);
13766 neon_three_same (neon_quad (rs), 0, -1);
13770 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
13773 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
13774 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
13776 if (et.type == NT_float)
13778 NEON_ENCODE (FLOAT, inst);
13779 neon_three_same (neon_quad (rs), 0, -1);
13783 NEON_ENCODE (INTEGER, inst);
13784 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
13789 do_neon_dyadic_if_su (void)
13791 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13795 do_neon_dyadic_if_su_d (void)
13797 /* This version only allow D registers, but that constraint is enforced during
13798 operand parsing so we don't need to do anything extra here. */
13799 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
13803 do_neon_dyadic_if_i_d (void)
13805 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13806 affected if we specify unsigned args. */
13807 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
13810 enum vfp_or_neon_is_neon_bits
13813 NEON_CHECK_ARCH = 2
13816 /* Call this function if an instruction which may have belonged to the VFP or
13817 Neon instruction sets, but turned out to be a Neon instruction (due to the
13818 operand types involved, etc.). We have to check and/or fix-up a couple of
13821 - Make sure the user hasn't attempted to make a Neon instruction
13823 - Alter the value in the condition code field if necessary.
13824 - Make sure that the arch supports Neon instructions.
13826 Which of these operations take place depends on bits from enum
13827 vfp_or_neon_is_neon_bits.
13829 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
13830 current instruction's condition is COND_ALWAYS, the condition field is
13831 changed to inst.uncond_value. This is necessary because instructions shared
13832 between VFP and Neon may be conditional for the VFP variants only, and the
13833 unconditional Neon version must have, e.g., 0xF in the condition field. */
13836 vfp_or_neon_is_neon (unsigned check)
13838 /* Conditions are always legal in Thumb mode (IT blocks). */
13839 if (!thumb_mode && (check & NEON_CHECK_CC))
13841 if (inst.cond != COND_ALWAYS)
13843 first_error (_(BAD_COND));
13846 if (inst.uncond_value != -1)
13847 inst.instruction |= inst.uncond_value << 28;
13850 if ((check & NEON_CHECK_ARCH)
13851 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
13853 first_error (_(BAD_FPU));
13861 do_neon_addsub_if_i (void)
13863 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
13866 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
13869 /* The "untyped" case can't happen. Do this to stop the "U" bit being
13870 affected if we specify unsigned args. */
13871 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
13874 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
13876 V<op> A,B (A is operand 0, B is operand 2)
13881 so handle that case specially. */
13884 neon_exchange_operands (void)
13886 void *scratch = alloca (sizeof (inst.operands[0]));
13887 if (inst.operands[1].present)
13889 /* Swap operands[1] and operands[2]. */
13890 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
13891 inst.operands[1] = inst.operands[2];
13892 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
13896 inst.operands[1] = inst.operands[2];
13897 inst.operands[2] = inst.operands[0];
13902 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
13904 if (inst.operands[2].isreg)
13907 neon_exchange_operands ();
13908 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
13912 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13913 struct neon_type_el et = neon_check_type (2, rs,
13914 N_EQK | N_SIZ, immtypes | N_KEY);
13916 NEON_ENCODE (IMMED, inst);
13917 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13918 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13919 inst.instruction |= LOW4 (inst.operands[1].reg);
13920 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
13921 inst.instruction |= neon_quad (rs) << 6;
13922 inst.instruction |= (et.type == NT_float) << 10;
13923 inst.instruction |= neon_logbits (et.size) << 18;
13925 neon_dp_fixup (&inst);
13932 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
13936 do_neon_cmp_inv (void)
13938 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
13944 neon_compare (N_IF_32, N_IF_32, FALSE);
13947 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
13948 scalars, which are encoded in 5 bits, M : Rm.
13949 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
13950 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
13954 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
13956 unsigned regno = NEON_SCALAR_REG (scalar);
13957 unsigned elno = NEON_SCALAR_INDEX (scalar);
13962 if (regno > 7 || elno > 3)
13964 return regno | (elno << 3);
13967 if (regno > 15 || elno > 1)
13969 return regno | (elno << 4);
13973 first_error (_("scalar out of range for multiply instruction"));
13979 /* Encode multiply / multiply-accumulate scalar instructions. */
13982 neon_mul_mac (struct neon_type_el et, int ubit)
13986 /* Give a more helpful error message if we have an invalid type. */
13987 if (et.type == NT_invtype)
13990 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
13991 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13992 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13993 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13994 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13995 inst.instruction |= LOW4 (scalar);
13996 inst.instruction |= HI1 (scalar) << 5;
13997 inst.instruction |= (et.type == NT_float) << 8;
13998 inst.instruction |= neon_logbits (et.size) << 20;
13999 inst.instruction |= (ubit != 0) << 24;
14001 neon_dp_fixup (&inst);
14005 do_neon_mac_maybe_scalar (void)
14007 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14010 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14013 if (inst.operands[2].isscalar)
14015 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14016 struct neon_type_el et = neon_check_type (3, rs,
14017 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14018 NEON_ENCODE (SCALAR, inst);
14019 neon_mul_mac (et, neon_quad (rs));
14023 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14024 affected if we specify unsigned args. */
14025 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14030 do_neon_fmac (void)
14032 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14035 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14038 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14044 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14045 struct neon_type_el et = neon_check_type (3, rs,
14046 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14047 neon_three_same (neon_quad (rs), 0, et.size);
14050 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14051 same types as the MAC equivalents. The polynomial type for this instruction
14052 is encoded the same as the integer type. */
14057 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14060 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14063 if (inst.operands[2].isscalar)
14064 do_neon_mac_maybe_scalar ();
14066 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14070 do_neon_qdmulh (void)
14072 if (inst.operands[2].isscalar)
14074 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14075 struct neon_type_el et = neon_check_type (3, rs,
14076 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14077 NEON_ENCODE (SCALAR, inst);
14078 neon_mul_mac (et, neon_quad (rs));
14082 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14083 struct neon_type_el et = neon_check_type (3, rs,
14084 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14085 NEON_ENCODE (INTEGER, inst);
14086 /* The U bit (rounding) comes from bit mask. */
14087 neon_three_same (neon_quad (rs), 0, et.size);
14092 do_neon_fcmp_absolute (void)
14094 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14095 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14096 /* Size field comes from bit mask. */
14097 neon_three_same (neon_quad (rs), 1, -1);
14101 do_neon_fcmp_absolute_inv (void)
14103 neon_exchange_operands ();
14104 do_neon_fcmp_absolute ();
14108 do_neon_step (void)
14110 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14111 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14112 neon_three_same (neon_quad (rs), 0, -1);
14116 do_neon_abs_neg (void)
14118 enum neon_shape rs;
14119 struct neon_type_el et;
14121 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14124 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14127 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14128 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14130 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14131 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14132 inst.instruction |= LOW4 (inst.operands[1].reg);
14133 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14134 inst.instruction |= neon_quad (rs) << 6;
14135 inst.instruction |= (et.type == NT_float) << 10;
14136 inst.instruction |= neon_logbits (et.size) << 18;
14138 neon_dp_fixup (&inst);
14144 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14145 struct neon_type_el et = neon_check_type (2, rs,
14146 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14147 int imm = inst.operands[2].imm;
14148 constraint (imm < 0 || (unsigned)imm >= et.size,
14149 _("immediate out of range for insert"));
14150 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14156 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14157 struct neon_type_el et = neon_check_type (2, rs,
14158 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14159 int imm = inst.operands[2].imm;
14160 constraint (imm < 1 || (unsigned)imm > et.size,
14161 _("immediate out of range for insert"));
14162 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14166 do_neon_qshlu_imm (void)
14168 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14169 struct neon_type_el et = neon_check_type (2, rs,
14170 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14171 int imm = inst.operands[2].imm;
14172 constraint (imm < 0 || (unsigned)imm >= et.size,
14173 _("immediate out of range for shift"));
14174 /* Only encodes the 'U present' variant of the instruction.
14175 In this case, signed types have OP (bit 8) set to 0.
14176 Unsigned types have OP set to 1. */
14177 inst.instruction |= (et.type == NT_unsigned) << 8;
14178 /* The rest of the bits are the same as other immediate shifts. */
14179 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14183 do_neon_qmovn (void)
14185 struct neon_type_el et = neon_check_type (2, NS_DQ,
14186 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14187 /* Saturating move where operands can be signed or unsigned, and the
14188 destination has the same signedness. */
14189 NEON_ENCODE (INTEGER, inst);
14190 if (et.type == NT_unsigned)
14191 inst.instruction |= 0xc0;
14193 inst.instruction |= 0x80;
14194 neon_two_same (0, 1, et.size / 2);
14198 do_neon_qmovun (void)
14200 struct neon_type_el et = neon_check_type (2, NS_DQ,
14201 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14202 /* Saturating move with unsigned results. Operands must be signed. */
14203 NEON_ENCODE (INTEGER, inst);
14204 neon_two_same (0, 1, et.size / 2);
14208 do_neon_rshift_sat_narrow (void)
14210 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14211 or unsigned. If operands are unsigned, results must also be unsigned. */
14212 struct neon_type_el et = neon_check_type (2, NS_DQI,
14213 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14214 int imm = inst.operands[2].imm;
14215 /* This gets the bounds check, size encoding and immediate bits calculation
14219 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14220 VQMOVN.I<size> <Dd>, <Qm>. */
14223 inst.operands[2].present = 0;
14224 inst.instruction = N_MNEM_vqmovn;
14229 constraint (imm < 1 || (unsigned)imm > et.size,
14230 _("immediate out of range"));
14231 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14235 do_neon_rshift_sat_narrow_u (void)
14237 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14238 or unsigned. If operands are unsigned, results must also be unsigned. */
14239 struct neon_type_el et = neon_check_type (2, NS_DQI,
14240 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14241 int imm = inst.operands[2].imm;
14242 /* This gets the bounds check, size encoding and immediate bits calculation
14246 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14247 VQMOVUN.I<size> <Dd>, <Qm>. */
14250 inst.operands[2].present = 0;
14251 inst.instruction = N_MNEM_vqmovun;
14256 constraint (imm < 1 || (unsigned)imm > et.size,
14257 _("immediate out of range"));
14258 /* FIXME: The manual is kind of unclear about what value U should have in
14259 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14261 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14265 do_neon_movn (void)
14267 struct neon_type_el et = neon_check_type (2, NS_DQ,
14268 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14269 NEON_ENCODE (INTEGER, inst);
14270 neon_two_same (0, 1, et.size / 2);
14274 do_neon_rshift_narrow (void)
14276 struct neon_type_el et = neon_check_type (2, NS_DQI,
14277 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14278 int imm = inst.operands[2].imm;
14279 /* This gets the bounds check, size encoding and immediate bits calculation
14283 /* If immediate is zero then we are a pseudo-instruction for
14284 VMOVN.I<size> <Dd>, <Qm> */
14287 inst.operands[2].present = 0;
14288 inst.instruction = N_MNEM_vmovn;
14293 constraint (imm < 1 || (unsigned)imm > et.size,
14294 _("immediate out of range for narrowing operation"));
14295 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14299 do_neon_shll (void)
14301 /* FIXME: Type checking when lengthening. */
14302 struct neon_type_el et = neon_check_type (2, NS_QDI,
14303 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14304 unsigned imm = inst.operands[2].imm;
14306 if (imm == et.size)
14308 /* Maximum shift variant. */
14309 NEON_ENCODE (INTEGER, inst);
14310 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14311 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14312 inst.instruction |= LOW4 (inst.operands[1].reg);
14313 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14314 inst.instruction |= neon_logbits (et.size) << 18;
14316 neon_dp_fixup (&inst);
14320 /* A more-specific type check for non-max versions. */
14321 et = neon_check_type (2, NS_QDI,
14322 N_EQK | N_DBL, N_SU_32 | N_KEY);
14323 NEON_ENCODE (IMMED, inst);
14324 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14328 /* Check the various types for the VCVT instruction, and return which version
14329 the current instruction is. */
14332 neon_cvt_flavour (enum neon_shape rs)
14334 #define CVT_VAR(C,X,Y) \
14335 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
14336 if (et.type != NT_invtype) \
14338 inst.error = NULL; \
14341 struct neon_type_el et;
14342 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14343 || rs == NS_FF) ? N_VFP : 0;
14344 /* The instruction versions which take an immediate take one register
14345 argument, which is extended to the width of the full register. Thus the
14346 "source" and "destination" registers must have the same width. Hack that
14347 here by making the size equal to the key (wider, in this case) operand. */
14348 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14350 CVT_VAR (0, N_S32, N_F32);
14351 CVT_VAR (1, N_U32, N_F32);
14352 CVT_VAR (2, N_F32, N_S32);
14353 CVT_VAR (3, N_F32, N_U32);
14354 /* Half-precision conversions. */
14355 CVT_VAR (4, N_F32, N_F16);
14356 CVT_VAR (5, N_F16, N_F32);
14360 /* VFP instructions. */
14361 CVT_VAR (6, N_F32, N_F64);
14362 CVT_VAR (7, N_F64, N_F32);
14363 CVT_VAR (8, N_S32, N_F64 | key);
14364 CVT_VAR (9, N_U32, N_F64 | key);
14365 CVT_VAR (10, N_F64 | key, N_S32);
14366 CVT_VAR (11, N_F64 | key, N_U32);
14367 /* VFP instructions with bitshift. */
14368 CVT_VAR (12, N_F32 | key, N_S16);
14369 CVT_VAR (13, N_F32 | key, N_U16);
14370 CVT_VAR (14, N_F64 | key, N_S16);
14371 CVT_VAR (15, N_F64 | key, N_U16);
14372 CVT_VAR (16, N_S16, N_F32 | key);
14373 CVT_VAR (17, N_U16, N_F32 | key);
14374 CVT_VAR (18, N_S16, N_F64 | key);
14375 CVT_VAR (19, N_U16, N_F64 | key);
14381 /* Neon-syntax VFP conversions. */
14384 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
14386 const char *opname = 0;
14388 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14390 /* Conversions with immediate bitshift. */
14391 const char *enc[] =
14415 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14417 opname = enc[flavour];
14418 constraint (inst.operands[0].reg != inst.operands[1].reg,
14419 _("operands 0 and 1 must be the same register"));
14420 inst.operands[1] = inst.operands[2];
14421 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14426 /* Conversions without bitshift. */
14427 const char *enc[] =
14443 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
14444 opname = enc[flavour];
14448 do_vfp_nsyn_opcode (opname);
14452 do_vfp_nsyn_cvtz (void)
14454 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
14455 int flavour = neon_cvt_flavour (rs);
14456 const char *enc[] =
14470 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
14471 do_vfp_nsyn_opcode (enc[flavour]);
14475 do_neon_cvt_1 (bfd_boolean round_to_zero ATTRIBUTE_UNUSED)
14477 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
14478 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
14479 int flavour = neon_cvt_flavour (rs);
14481 /* PR11109: Handle round-to-zero for VCVT conversions. */
14483 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
14484 && (flavour == 0 || flavour == 1 || flavour == 8 || flavour == 9)
14485 && (rs == NS_FD || rs == NS_FF))
14487 do_vfp_nsyn_cvtz ();
14491 /* VFP rather than Neon conversions. */
14494 do_vfp_nsyn_cvt (rs, flavour);
14504 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
14506 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14509 /* Fixed-point conversion with #0 immediate is encoded as an
14510 integer conversion. */
14511 if (inst.operands[2].present && inst.operands[2].imm == 0)
14513 immbits = 32 - inst.operands[2].imm;
14514 NEON_ENCODE (IMMED, inst);
14516 inst.instruction |= enctab[flavour];
14517 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14518 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14519 inst.instruction |= LOW4 (inst.operands[1].reg);
14520 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14521 inst.instruction |= neon_quad (rs) << 6;
14522 inst.instruction |= 1 << 21;
14523 inst.instruction |= immbits << 16;
14525 neon_dp_fixup (&inst);
14533 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
14535 NEON_ENCODE (INTEGER, inst);
14537 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14541 inst.instruction |= enctab[flavour];
14543 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14544 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14545 inst.instruction |= LOW4 (inst.operands[1].reg);
14546 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14547 inst.instruction |= neon_quad (rs) << 6;
14548 inst.instruction |= 2 << 18;
14550 neon_dp_fixup (&inst);
14554 /* Half-precision conversions for Advanced SIMD -- neon. */
14559 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
14561 as_bad (_("operand size must match register width"));
14566 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
14568 as_bad (_("operand size must match register width"));
14573 inst.instruction = 0x3b60600;
14575 inst.instruction = 0x3b60700;
14577 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14578 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14579 inst.instruction |= LOW4 (inst.operands[1].reg);
14580 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14581 neon_dp_fixup (&inst);
14585 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
14586 do_vfp_nsyn_cvt (rs, flavour);
14591 do_neon_cvtr (void)
14593 do_neon_cvt_1 (FALSE);
14599 do_neon_cvt_1 (TRUE);
14603 do_neon_cvtb (void)
14605 inst.instruction = 0xeb20a40;
14607 /* The sizes are attached to the mnemonic. */
14608 if (inst.vectype.el[0].type != NT_invtype
14609 && inst.vectype.el[0].size == 16)
14610 inst.instruction |= 0x00010000;
14612 /* Programmer's syntax: the sizes are attached to the operands. */
14613 else if (inst.operands[0].vectype.type != NT_invtype
14614 && inst.operands[0].vectype.size == 16)
14615 inst.instruction |= 0x00010000;
14617 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
14618 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
14619 do_vfp_cond_or_thumb ();
14624 do_neon_cvtt (void)
14627 inst.instruction |= 0x80;
14631 neon_move_immediate (void)
14633 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
14634 struct neon_type_el et = neon_check_type (2, rs,
14635 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14636 unsigned immlo, immhi = 0, immbits;
14637 int op, cmode, float_p;
14639 constraint (et.type == NT_invtype,
14640 _("operand size must be specified for immediate VMOV"));
14642 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
14643 op = (inst.instruction & (1 << 5)) != 0;
14645 immlo = inst.operands[1].imm;
14646 if (inst.operands[1].regisimm)
14647 immhi = inst.operands[1].reg;
14649 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
14650 _("immediate has bits set outside the operand size"));
14652 float_p = inst.operands[1].immisfloat;
14654 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
14655 et.size, et.type)) == FAIL)
14657 /* Invert relevant bits only. */
14658 neon_invert_size (&immlo, &immhi, et.size);
14659 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
14660 with one or the other; those cases are caught by
14661 neon_cmode_for_move_imm. */
14663 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
14664 &op, et.size, et.type)) == FAIL)
14666 first_error (_("immediate out of range"));
14671 inst.instruction &= ~(1 << 5);
14672 inst.instruction |= op << 5;
14674 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14675 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14676 inst.instruction |= neon_quad (rs) << 6;
14677 inst.instruction |= cmode << 8;
14679 neon_write_immbits (immbits);
14685 if (inst.operands[1].isreg)
14687 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14689 NEON_ENCODE (INTEGER, inst);
14690 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14691 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14692 inst.instruction |= LOW4 (inst.operands[1].reg);
14693 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14694 inst.instruction |= neon_quad (rs) << 6;
14698 NEON_ENCODE (IMMED, inst);
14699 neon_move_immediate ();
14702 neon_dp_fixup (&inst);
14705 /* Encode instructions of form:
14707 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14708 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
14711 neon_mixed_length (struct neon_type_el et, unsigned size)
14713 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14714 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14715 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14716 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14717 inst.instruction |= LOW4 (inst.operands[2].reg);
14718 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14719 inst.instruction |= (et.type == NT_unsigned) << 24;
14720 inst.instruction |= neon_logbits (size) << 20;
14722 neon_dp_fixup (&inst);
14726 do_neon_dyadic_long (void)
14728 /* FIXME: Type checking for lengthening op. */
14729 struct neon_type_el et = neon_check_type (3, NS_QDD,
14730 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
14731 neon_mixed_length (et, et.size);
14735 do_neon_abal (void)
14737 struct neon_type_el et = neon_check_type (3, NS_QDD,
14738 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
14739 neon_mixed_length (et, et.size);
14743 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
14745 if (inst.operands[2].isscalar)
14747 struct neon_type_el et = neon_check_type (3, NS_QDS,
14748 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
14749 NEON_ENCODE (SCALAR, inst);
14750 neon_mul_mac (et, et.type == NT_unsigned);
14754 struct neon_type_el et = neon_check_type (3, NS_QDD,
14755 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
14756 NEON_ENCODE (INTEGER, inst);
14757 neon_mixed_length (et, et.size);
14762 do_neon_mac_maybe_scalar_long (void)
14764 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
14768 do_neon_dyadic_wide (void)
14770 struct neon_type_el et = neon_check_type (3, NS_QQD,
14771 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
14772 neon_mixed_length (et, et.size);
14776 do_neon_dyadic_narrow (void)
14778 struct neon_type_el et = neon_check_type (3, NS_QDD,
14779 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
14780 /* Operand sign is unimportant, and the U bit is part of the opcode,
14781 so force the operand type to integer. */
14782 et.type = NT_integer;
14783 neon_mixed_length (et, et.size / 2);
14787 do_neon_mul_sat_scalar_long (void)
14789 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
14793 do_neon_vmull (void)
14795 if (inst.operands[2].isscalar)
14796 do_neon_mac_maybe_scalar_long ();
14799 struct neon_type_el et = neon_check_type (3, NS_QDD,
14800 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
14801 if (et.type == NT_poly)
14802 NEON_ENCODE (POLY, inst);
14804 NEON_ENCODE (INTEGER, inst);
14805 /* For polynomial encoding, size field must be 0b00 and the U bit must be
14806 zero. Should be OK as-is. */
14807 neon_mixed_length (et, et.size);
14814 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
14815 struct neon_type_el et = neon_check_type (3, rs,
14816 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14817 unsigned imm = (inst.operands[3].imm * et.size) / 8;
14819 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
14820 _("shift out of range"));
14821 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14822 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14823 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14824 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14825 inst.instruction |= LOW4 (inst.operands[2].reg);
14826 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14827 inst.instruction |= neon_quad (rs) << 6;
14828 inst.instruction |= imm << 8;
14830 neon_dp_fixup (&inst);
14836 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14837 struct neon_type_el et = neon_check_type (2, rs,
14838 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14839 unsigned op = (inst.instruction >> 7) & 3;
14840 /* N (width of reversed regions) is encoded as part of the bitmask. We
14841 extract it here to check the elements to be reversed are smaller.
14842 Otherwise we'd get a reserved instruction. */
14843 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
14844 gas_assert (elsize != 0);
14845 constraint (et.size >= elsize,
14846 _("elements must be smaller than reversal region"));
14847 neon_two_same (neon_quad (rs), 1, et.size);
14853 if (inst.operands[1].isscalar)
14855 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
14856 struct neon_type_el et = neon_check_type (2, rs,
14857 N_EQK, N_8 | N_16 | N_32 | N_KEY);
14858 unsigned sizebits = et.size >> 3;
14859 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
14860 int logsize = neon_logbits (et.size);
14861 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
14863 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
14866 NEON_ENCODE (SCALAR, inst);
14867 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14868 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14869 inst.instruction |= LOW4 (dm);
14870 inst.instruction |= HI1 (dm) << 5;
14871 inst.instruction |= neon_quad (rs) << 6;
14872 inst.instruction |= x << 17;
14873 inst.instruction |= sizebits << 16;
14875 neon_dp_fixup (&inst);
14879 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
14880 struct neon_type_el et = neon_check_type (2, rs,
14881 N_8 | N_16 | N_32 | N_KEY, N_EQK);
14882 /* Duplicate ARM register to lanes of vector. */
14883 NEON_ENCODE (ARMREG, inst);
14886 case 8: inst.instruction |= 0x400000; break;
14887 case 16: inst.instruction |= 0x000020; break;
14888 case 32: inst.instruction |= 0x000000; break;
14891 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
14892 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
14893 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
14894 inst.instruction |= neon_quad (rs) << 21;
14895 /* The encoding for this instruction is identical for the ARM and Thumb
14896 variants, except for the condition field. */
14897 do_vfp_cond_or_thumb ();
14901 /* VMOV has particularly many variations. It can be one of:
14902 0. VMOV<c><q> <Qd>, <Qm>
14903 1. VMOV<c><q> <Dd>, <Dm>
14904 (Register operations, which are VORR with Rm = Rn.)
14905 2. VMOV<c><q>.<dt> <Qd>, #<imm>
14906 3. VMOV<c><q>.<dt> <Dd>, #<imm>
14908 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
14909 (ARM register to scalar.)
14910 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
14911 (Two ARM registers to vector.)
14912 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
14913 (Scalar to ARM register.)
14914 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
14915 (Vector to two ARM registers.)
14916 8. VMOV.F32 <Sd>, <Sm>
14917 9. VMOV.F64 <Dd>, <Dm>
14918 (VFP register moves.)
14919 10. VMOV.F32 <Sd>, #imm
14920 11. VMOV.F64 <Dd>, #imm
14921 (VFP float immediate load.)
14922 12. VMOV <Rd>, <Sm>
14923 (VFP single to ARM reg.)
14924 13. VMOV <Sd>, <Rm>
14925 (ARM reg to VFP single.)
14926 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
14927 (Two ARM regs to two VFP singles.)
14928 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
14929 (Two VFP singles to two ARM regs.)
14931 These cases can be disambiguated using neon_select_shape, except cases 1/9
14932 and 3/11 which depend on the operand type too.
14934 All the encoded bits are hardcoded by this function.
14936 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
14937 Cases 5, 7 may be used with VFPv2 and above.
14939 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
14940 can specify a type where it doesn't make sense to, and is ignored). */
14945 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
14946 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
14948 struct neon_type_el et;
14949 const char *ldconst = 0;
14953 case NS_DD: /* case 1/9. */
14954 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14955 /* It is not an error here if no type is given. */
14957 if (et.type == NT_float && et.size == 64)
14959 do_vfp_nsyn_opcode ("fcpyd");
14962 /* fall through. */
14964 case NS_QQ: /* case 0/1. */
14966 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14968 /* The architecture manual I have doesn't explicitly state which
14969 value the U bit should have for register->register moves, but
14970 the equivalent VORR instruction has U = 0, so do that. */
14971 inst.instruction = 0x0200110;
14972 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14973 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14974 inst.instruction |= LOW4 (inst.operands[1].reg);
14975 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14976 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14977 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14978 inst.instruction |= neon_quad (rs) << 6;
14980 neon_dp_fixup (&inst);
14984 case NS_DI: /* case 3/11. */
14985 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
14987 if (et.type == NT_float && et.size == 64)
14989 /* case 11 (fconstd). */
14990 ldconst = "fconstd";
14991 goto encode_fconstd;
14993 /* fall through. */
14995 case NS_QI: /* case 2/3. */
14996 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14998 inst.instruction = 0x0800010;
14999 neon_move_immediate ();
15000 neon_dp_fixup (&inst);
15003 case NS_SR: /* case 4. */
15005 unsigned bcdebits = 0;
15007 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15008 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15010 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15011 logsize = neon_logbits (et.size);
15013 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15015 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15016 && et.size != 32, _(BAD_FPU));
15017 constraint (et.type == NT_invtype, _("bad type for scalar"));
15018 constraint (x >= 64 / et.size, _("scalar index out of range"));
15022 case 8: bcdebits = 0x8; break;
15023 case 16: bcdebits = 0x1; break;
15024 case 32: bcdebits = 0x0; break;
15028 bcdebits |= x << logsize;
15030 inst.instruction = 0xe000b10;
15031 do_vfp_cond_or_thumb ();
15032 inst.instruction |= LOW4 (dn) << 16;
15033 inst.instruction |= HI1 (dn) << 7;
15034 inst.instruction |= inst.operands[1].reg << 12;
15035 inst.instruction |= (bcdebits & 3) << 5;
15036 inst.instruction |= (bcdebits >> 2) << 21;
15040 case NS_DRR: /* case 5 (fmdrr). */
15041 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15044 inst.instruction = 0xc400b10;
15045 do_vfp_cond_or_thumb ();
15046 inst.instruction |= LOW4 (inst.operands[0].reg);
15047 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15048 inst.instruction |= inst.operands[1].reg << 12;
15049 inst.instruction |= inst.operands[2].reg << 16;
15052 case NS_RS: /* case 6. */
15055 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15056 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15057 unsigned abcdebits = 0;
15059 et = neon_check_type (2, NS_NULL,
15060 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15061 logsize = neon_logbits (et.size);
15063 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15065 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15066 && et.size != 32, _(BAD_FPU));
15067 constraint (et.type == NT_invtype, _("bad type for scalar"));
15068 constraint (x >= 64 / et.size, _("scalar index out of range"));
15072 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15073 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15074 case 32: abcdebits = 0x00; break;
15078 abcdebits |= x << logsize;
15079 inst.instruction = 0xe100b10;
15080 do_vfp_cond_or_thumb ();
15081 inst.instruction |= LOW4 (dn) << 16;
15082 inst.instruction |= HI1 (dn) << 7;
15083 inst.instruction |= inst.operands[0].reg << 12;
15084 inst.instruction |= (abcdebits & 3) << 5;
15085 inst.instruction |= (abcdebits >> 2) << 21;
15089 case NS_RRD: /* case 7 (fmrrd). */
15090 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15093 inst.instruction = 0xc500b10;
15094 do_vfp_cond_or_thumb ();
15095 inst.instruction |= inst.operands[0].reg << 12;
15096 inst.instruction |= inst.operands[1].reg << 16;
15097 inst.instruction |= LOW4 (inst.operands[2].reg);
15098 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15101 case NS_FF: /* case 8 (fcpys). */
15102 do_vfp_nsyn_opcode ("fcpys");
15105 case NS_FI: /* case 10 (fconsts). */
15106 ldconst = "fconsts";
15108 if (is_quarter_float (inst.operands[1].imm))
15110 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15111 do_vfp_nsyn_opcode (ldconst);
15114 first_error (_("immediate out of range"));
15117 case NS_RF: /* case 12 (fmrs). */
15118 do_vfp_nsyn_opcode ("fmrs");
15121 case NS_FR: /* case 13 (fmsr). */
15122 do_vfp_nsyn_opcode ("fmsr");
15125 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15126 (one of which is a list), but we have parsed four. Do some fiddling to
15127 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15129 case NS_RRFF: /* case 14 (fmrrs). */
15130 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15131 _("VFP registers must be adjacent"));
15132 inst.operands[2].imm = 2;
15133 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15134 do_vfp_nsyn_opcode ("fmrrs");
15137 case NS_FFRR: /* case 15 (fmsrr). */
15138 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15139 _("VFP registers must be adjacent"));
15140 inst.operands[1] = inst.operands[2];
15141 inst.operands[2] = inst.operands[3];
15142 inst.operands[0].imm = 2;
15143 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15144 do_vfp_nsyn_opcode ("fmsrr");
15153 do_neon_rshift_round_imm (void)
15155 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15156 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15157 int imm = inst.operands[2].imm;
15159 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15162 inst.operands[2].present = 0;
15167 constraint (imm < 1 || (unsigned)imm > et.size,
15168 _("immediate out of range for shift"));
15169 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15174 do_neon_movl (void)
15176 struct neon_type_el et = neon_check_type (2, NS_QD,
15177 N_EQK | N_DBL, N_SU_32 | N_KEY);
15178 unsigned sizebits = et.size >> 3;
15179 inst.instruction |= sizebits << 19;
15180 neon_two_same (0, et.type == NT_unsigned, -1);
15186 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15187 struct neon_type_el et = neon_check_type (2, rs,
15188 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15189 NEON_ENCODE (INTEGER, inst);
15190 neon_two_same (neon_quad (rs), 1, et.size);
15194 do_neon_zip_uzp (void)
15196 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15197 struct neon_type_el et = neon_check_type (2, rs,
15198 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15199 if (rs == NS_DD && et.size == 32)
15201 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15202 inst.instruction = N_MNEM_vtrn;
15206 neon_two_same (neon_quad (rs), 1, et.size);
15210 do_neon_sat_abs_neg (void)
15212 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15213 struct neon_type_el et = neon_check_type (2, rs,
15214 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15215 neon_two_same (neon_quad (rs), 1, et.size);
15219 do_neon_pair_long (void)
15221 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15222 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15223 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15224 inst.instruction |= (et.type == NT_unsigned) << 7;
15225 neon_two_same (neon_quad (rs), 1, et.size);
15229 do_neon_recip_est (void)
15231 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15232 struct neon_type_el et = neon_check_type (2, rs,
15233 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15234 inst.instruction |= (et.type == NT_float) << 8;
15235 neon_two_same (neon_quad (rs), 1, et.size);
15241 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15242 struct neon_type_el et = neon_check_type (2, rs,
15243 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15244 neon_two_same (neon_quad (rs), 1, et.size);
15250 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15251 struct neon_type_el et = neon_check_type (2, rs,
15252 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15253 neon_two_same (neon_quad (rs), 1, et.size);
15259 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15260 struct neon_type_el et = neon_check_type (2, rs,
15261 N_EQK | N_INT, N_8 | N_KEY);
15262 neon_two_same (neon_quad (rs), 1, et.size);
15268 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15269 neon_two_same (neon_quad (rs), 1, -1);
15273 do_neon_tbl_tbx (void)
15275 unsigned listlenbits;
15276 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
15278 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
15280 first_error (_("bad list length for table lookup"));
15284 listlenbits = inst.operands[1].imm - 1;
15285 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15286 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15287 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15288 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15289 inst.instruction |= LOW4 (inst.operands[2].reg);
15290 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15291 inst.instruction |= listlenbits << 8;
15293 neon_dp_fixup (&inst);
15297 do_neon_ldm_stm (void)
15299 /* P, U and L bits are part of bitmask. */
15300 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
15301 unsigned offsetbits = inst.operands[1].imm * 2;
15303 if (inst.operands[1].issingle)
15305 do_vfp_nsyn_ldm_stm (is_dbmode);
15309 constraint (is_dbmode && !inst.operands[0].writeback,
15310 _("writeback (!) must be used for VLDMDB and VSTMDB"));
15312 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
15313 _("register list must contain at least 1 and at most 16 "
15316 inst.instruction |= inst.operands[0].reg << 16;
15317 inst.instruction |= inst.operands[0].writeback << 21;
15318 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15319 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
15321 inst.instruction |= offsetbits;
15323 do_vfp_cond_or_thumb ();
15327 do_neon_ldr_str (void)
15329 int is_ldr = (inst.instruction & (1 << 20)) != 0;
15331 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
15332 And is UNPREDICTABLE in thumb mode. */
15334 && inst.operands[1].reg == REG_PC
15335 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
15337 if (!thumb_mode && warn_on_deprecated)
15338 as_warn (_("Use of PC here is deprecated"));
15340 inst.error = _("Use of PC here is UNPREDICTABLE");
15343 if (inst.operands[0].issingle)
15346 do_vfp_nsyn_opcode ("flds");
15348 do_vfp_nsyn_opcode ("fsts");
15353 do_vfp_nsyn_opcode ("fldd");
15355 do_vfp_nsyn_opcode ("fstd");
15359 /* "interleave" version also handles non-interleaving register VLD1/VST1
15363 do_neon_ld_st_interleave (void)
15365 struct neon_type_el et = neon_check_type (1, NS_NULL,
15366 N_8 | N_16 | N_32 | N_64);
15367 unsigned alignbits = 0;
15369 /* The bits in this table go:
15370 0: register stride of one (0) or two (1)
15371 1,2: register list length, minus one (1, 2, 3, 4).
15372 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
15373 We use -1 for invalid entries. */
15374 const int typetable[] =
15376 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
15377 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
15378 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
15379 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
15383 if (et.type == NT_invtype)
15386 if (inst.operands[1].immisalign)
15387 switch (inst.operands[1].imm >> 8)
15389 case 64: alignbits = 1; break;
15391 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
15392 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15393 goto bad_alignment;
15397 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
15398 goto bad_alignment;
15403 first_error (_("bad alignment"));
15407 inst.instruction |= alignbits << 4;
15408 inst.instruction |= neon_logbits (et.size) << 6;
15410 /* Bits [4:6] of the immediate in a list specifier encode register stride
15411 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
15412 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
15413 up the right value for "type" in a table based on this value and the given
15414 list style, then stick it back. */
15415 idx = ((inst.operands[0].imm >> 4) & 7)
15416 | (((inst.instruction >> 8) & 3) << 3);
15418 typebits = typetable[idx];
15420 constraint (typebits == -1, _("bad list type for instruction"));
15422 inst.instruction &= ~0xf00;
15423 inst.instruction |= typebits << 8;
15426 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
15427 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
15428 otherwise. The variable arguments are a list of pairs of legal (size, align)
15429 values, terminated with -1. */
15432 neon_alignment_bit (int size, int align, int *do_align, ...)
15435 int result = FAIL, thissize, thisalign;
15437 if (!inst.operands[1].immisalign)
15443 va_start (ap, do_align);
15447 thissize = va_arg (ap, int);
15448 if (thissize == -1)
15450 thisalign = va_arg (ap, int);
15452 if (size == thissize && align == thisalign)
15455 while (result != SUCCESS);
15459 if (result == SUCCESS)
15462 first_error (_("unsupported alignment for instruction"));
15468 do_neon_ld_st_lane (void)
15470 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15471 int align_good, do_align = 0;
15472 int logsize = neon_logbits (et.size);
15473 int align = inst.operands[1].imm >> 8;
15474 int n = (inst.instruction >> 8) & 3;
15475 int max_el = 64 / et.size;
15477 if (et.type == NT_invtype)
15480 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
15481 _("bad list length"));
15482 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
15483 _("scalar index out of range"));
15484 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
15486 _("stride of 2 unavailable when element size is 8"));
15490 case 0: /* VLD1 / VST1. */
15491 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
15493 if (align_good == FAIL)
15497 unsigned alignbits = 0;
15500 case 16: alignbits = 0x1; break;
15501 case 32: alignbits = 0x3; break;
15504 inst.instruction |= alignbits << 4;
15508 case 1: /* VLD2 / VST2. */
15509 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
15511 if (align_good == FAIL)
15514 inst.instruction |= 1 << 4;
15517 case 2: /* VLD3 / VST3. */
15518 constraint (inst.operands[1].immisalign,
15519 _("can't use alignment with this instruction"));
15522 case 3: /* VLD4 / VST4. */
15523 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15524 16, 64, 32, 64, 32, 128, -1);
15525 if (align_good == FAIL)
15529 unsigned alignbits = 0;
15532 case 8: alignbits = 0x1; break;
15533 case 16: alignbits = 0x1; break;
15534 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
15537 inst.instruction |= alignbits << 4;
15544 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
15545 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15546 inst.instruction |= 1 << (4 + logsize);
15548 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
15549 inst.instruction |= logsize << 10;
15552 /* Encode single n-element structure to all lanes VLD<n> instructions. */
15555 do_neon_ld_dup (void)
15557 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
15558 int align_good, do_align = 0;
15560 if (et.type == NT_invtype)
15563 switch ((inst.instruction >> 8) & 3)
15565 case 0: /* VLD1. */
15566 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
15567 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15568 &do_align, 16, 16, 32, 32, -1);
15569 if (align_good == FAIL)
15571 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
15574 case 2: inst.instruction |= 1 << 5; break;
15575 default: first_error (_("bad list length")); return;
15577 inst.instruction |= neon_logbits (et.size) << 6;
15580 case 1: /* VLD2. */
15581 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
15582 &do_align, 8, 16, 16, 32, 32, 64, -1);
15583 if (align_good == FAIL)
15585 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
15586 _("bad list length"));
15587 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15588 inst.instruction |= 1 << 5;
15589 inst.instruction |= neon_logbits (et.size) << 6;
15592 case 2: /* VLD3. */
15593 constraint (inst.operands[1].immisalign,
15594 _("can't use alignment with this instruction"));
15595 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
15596 _("bad list length"));
15597 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15598 inst.instruction |= 1 << 5;
15599 inst.instruction |= neon_logbits (et.size) << 6;
15602 case 3: /* VLD4. */
15604 int align = inst.operands[1].imm >> 8;
15605 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
15606 16, 64, 32, 64, 32, 128, -1);
15607 if (align_good == FAIL)
15609 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
15610 _("bad list length"));
15611 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
15612 inst.instruction |= 1 << 5;
15613 if (et.size == 32 && align == 128)
15614 inst.instruction |= 0x3 << 6;
15616 inst.instruction |= neon_logbits (et.size) << 6;
15623 inst.instruction |= do_align << 4;
15626 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
15627 apart from bits [11:4]. */
15630 do_neon_ldx_stx (void)
15632 if (inst.operands[1].isreg)
15633 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
15635 switch (NEON_LANE (inst.operands[0].imm))
15637 case NEON_INTERLEAVE_LANES:
15638 NEON_ENCODE (INTERLV, inst);
15639 do_neon_ld_st_interleave ();
15642 case NEON_ALL_LANES:
15643 NEON_ENCODE (DUP, inst);
15648 NEON_ENCODE (LANE, inst);
15649 do_neon_ld_st_lane ();
15652 /* L bit comes from bit mask. */
15653 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15654 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15655 inst.instruction |= inst.operands[1].reg << 16;
15657 if (inst.operands[1].postind)
15659 int postreg = inst.operands[1].imm & 0xf;
15660 constraint (!inst.operands[1].immisreg,
15661 _("post-index must be a register"));
15662 constraint (postreg == 0xd || postreg == 0xf,
15663 _("bad register for post-index"));
15664 inst.instruction |= postreg;
15666 else if (inst.operands[1].writeback)
15668 inst.instruction |= 0xd;
15671 inst.instruction |= 0xf;
15674 inst.instruction |= 0xf9000000;
15676 inst.instruction |= 0xf4000000;
15679 /* Overall per-instruction processing. */
15681 /* We need to be able to fix up arbitrary expressions in some statements.
15682 This is so that we can handle symbols that are an arbitrary distance from
15683 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
15684 which returns part of an address in a form which will be valid for
15685 a data instruction. We do this by pushing the expression into a symbol
15686 in the expr_section, and creating a fix for that. */
15689 fix_new_arm (fragS * frag,
15703 /* Create an absolute valued symbol, so we have something to
15704 refer to in the object file. Unfortunately for us, gas's
15705 generic expression parsing will already have folded out
15706 any use of .set foo/.type foo %function that may have
15707 been used to set type information of the target location,
15708 that's being specified symbolically. We have to presume
15709 the user knows what they are doing. */
15713 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
15715 symbol = symbol_find_or_make (name);
15716 S_SET_SEGMENT (symbol, absolute_section);
15717 symbol_set_frag (symbol, &zero_address_frag);
15718 S_SET_VALUE (symbol, exp->X_add_number);
15719 exp->X_op = O_symbol;
15720 exp->X_add_symbol = symbol;
15721 exp->X_add_number = 0;
15727 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
15728 (enum bfd_reloc_code_real) reloc);
15732 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
15733 pc_rel, (enum bfd_reloc_code_real) reloc);
15737 /* Mark whether the fix is to a THUMB instruction, or an ARM
15739 new_fix->tc_fix_data = thumb_mode;
15742 /* Create a frg for an instruction requiring relaxation. */
15744 output_relax_insn (void)
15750 /* The size of the instruction is unknown, so tie the debug info to the
15751 start of the instruction. */
15752 dwarf2_emit_insn (0);
15754 switch (inst.reloc.exp.X_op)
15757 sym = inst.reloc.exp.X_add_symbol;
15758 offset = inst.reloc.exp.X_add_number;
15762 offset = inst.reloc.exp.X_add_number;
15765 sym = make_expr_symbol (&inst.reloc.exp);
15769 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
15770 inst.relax, sym, offset, NULL/*offset, opcode*/);
15771 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
15774 /* Write a 32-bit thumb instruction to buf. */
15776 put_thumb32_insn (char * buf, unsigned long insn)
15778 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
15779 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
15783 output_inst (const char * str)
15789 as_bad ("%s -- `%s'", inst.error, str);
15794 output_relax_insn ();
15797 if (inst.size == 0)
15800 to = frag_more (inst.size);
15801 /* PR 9814: Record the thumb mode into the current frag so that we know
15802 what type of NOP padding to use, if necessary. We override any previous
15803 setting so that if the mode has changed then the NOPS that we use will
15804 match the encoding of the last instruction in the frag. */
15805 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
15807 if (thumb_mode && (inst.size > THUMB_SIZE))
15809 gas_assert (inst.size == (2 * THUMB_SIZE));
15810 put_thumb32_insn (to, inst.instruction);
15812 else if (inst.size > INSN_SIZE)
15814 gas_assert (inst.size == (2 * INSN_SIZE));
15815 md_number_to_chars (to, inst.instruction, INSN_SIZE);
15816 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
15819 md_number_to_chars (to, inst.instruction, inst.size);
15821 if (inst.reloc.type != BFD_RELOC_UNUSED)
15822 fix_new_arm (frag_now, to - frag_now->fr_literal,
15823 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
15826 dwarf2_emit_insn (inst.size);
15830 output_it_inst (int cond, int mask, char * to)
15832 unsigned long instruction = 0xbf00;
15835 instruction |= mask;
15836 instruction |= cond << 4;
15840 to = frag_more (2);
15842 dwarf2_emit_insn (2);
15846 md_number_to_chars (to, instruction, 2);
15851 /* Tag values used in struct asm_opcode's tag field. */
15854 OT_unconditional, /* Instruction cannot be conditionalized.
15855 The ARM condition field is still 0xE. */
15856 OT_unconditionalF, /* Instruction cannot be conditionalized
15857 and carries 0xF in its ARM condition field. */
15858 OT_csuffix, /* Instruction takes a conditional suffix. */
15859 OT_csuffixF, /* Some forms of the instruction take a conditional
15860 suffix, others place 0xF where the condition field
15862 OT_cinfix3, /* Instruction takes a conditional infix,
15863 beginning at character index 3. (In
15864 unified mode, it becomes a suffix.) */
15865 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
15866 tsts, cmps, cmns, and teqs. */
15867 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
15868 character index 3, even in unified mode. Used for
15869 legacy instructions where suffix and infix forms
15870 may be ambiguous. */
15871 OT_csuf_or_in3, /* Instruction takes either a conditional
15872 suffix or an infix at character index 3. */
15873 OT_odd_infix_unc, /* This is the unconditional variant of an
15874 instruction that takes a conditional infix
15875 at an unusual position. In unified mode,
15876 this variant will accept a suffix. */
15877 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
15878 are the conditional variants of instructions that
15879 take conditional infixes in unusual positions.
15880 The infix appears at character index
15881 (tag - OT_odd_infix_0). These are not accepted
15882 in unified mode. */
15885 /* Subroutine of md_assemble, responsible for looking up the primary
15886 opcode from the mnemonic the user wrote. STR points to the
15887 beginning of the mnemonic.
15889 This is not simply a hash table lookup, because of conditional
15890 variants. Most instructions have conditional variants, which are
15891 expressed with a _conditional affix_ to the mnemonic. If we were
15892 to encode each conditional variant as a literal string in the opcode
15893 table, it would have approximately 20,000 entries.
15895 Most mnemonics take this affix as a suffix, and in unified syntax,
15896 'most' is upgraded to 'all'. However, in the divided syntax, some
15897 instructions take the affix as an infix, notably the s-variants of
15898 the arithmetic instructions. Of those instructions, all but six
15899 have the infix appear after the third character of the mnemonic.
15901 Accordingly, the algorithm for looking up primary opcodes given
15904 1. Look up the identifier in the opcode table.
15905 If we find a match, go to step U.
15907 2. Look up the last two characters of the identifier in the
15908 conditions table. If we find a match, look up the first N-2
15909 characters of the identifier in the opcode table. If we
15910 find a match, go to step CE.
15912 3. Look up the fourth and fifth characters of the identifier in
15913 the conditions table. If we find a match, extract those
15914 characters from the identifier, and look up the remaining
15915 characters in the opcode table. If we find a match, go
15920 U. Examine the tag field of the opcode structure, in case this is
15921 one of the six instructions with its conditional infix in an
15922 unusual place. If it is, the tag tells us where to find the
15923 infix; look it up in the conditions table and set inst.cond
15924 accordingly. Otherwise, this is an unconditional instruction.
15925 Again set inst.cond accordingly. Return the opcode structure.
15927 CE. Examine the tag field to make sure this is an instruction that
15928 should receive a conditional suffix. If it is not, fail.
15929 Otherwise, set inst.cond from the suffix we already looked up,
15930 and return the opcode structure.
15932 CM. Examine the tag field to make sure this is an instruction that
15933 should receive a conditional infix after the third character.
15934 If it is not, fail. Otherwise, undo the edits to the current
15935 line of input and proceed as for case CE. */
15937 static const struct asm_opcode *
15938 opcode_lookup (char **str)
15942 const struct asm_opcode *opcode;
15943 const struct asm_cond *cond;
15946 /* Scan up to the end of the mnemonic, which must end in white space,
15947 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
15948 for (base = end = *str; *end != '\0'; end++)
15949 if (*end == ' ' || *end == '.')
15955 /* Handle a possible width suffix and/or Neon type suffix. */
15960 /* The .w and .n suffixes are only valid if the unified syntax is in
15962 if (unified_syntax && end[1] == 'w')
15964 else if (unified_syntax && end[1] == 'n')
15969 inst.vectype.elems = 0;
15971 *str = end + offset;
15973 if (end[offset] == '.')
15975 /* See if we have a Neon type suffix (possible in either unified or
15976 non-unified ARM syntax mode). */
15977 if (parse_neon_type (&inst.vectype, str) == FAIL)
15980 else if (end[offset] != '\0' && end[offset] != ' ')
15986 /* Look for unaffixed or special-case affixed mnemonic. */
15987 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
15992 if (opcode->tag < OT_odd_infix_0)
15994 inst.cond = COND_ALWAYS;
15998 if (warn_on_deprecated && unified_syntax)
15999 as_warn (_("conditional infixes are deprecated in unified syntax"));
16000 affix = base + (opcode->tag - OT_odd_infix_0);
16001 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16004 inst.cond = cond->value;
16008 /* Cannot have a conditional suffix on a mnemonic of less than two
16010 if (end - base < 3)
16013 /* Look for suffixed mnemonic. */
16015 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16016 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16018 if (opcode && cond)
16021 switch (opcode->tag)
16023 case OT_cinfix3_legacy:
16024 /* Ignore conditional suffixes matched on infix only mnemonics. */
16028 case OT_cinfix3_deprecated:
16029 case OT_odd_infix_unc:
16030 if (!unified_syntax)
16032 /* else fall through */
16036 case OT_csuf_or_in3:
16037 inst.cond = cond->value;
16040 case OT_unconditional:
16041 case OT_unconditionalF:
16043 inst.cond = cond->value;
16046 /* Delayed diagnostic. */
16047 inst.error = BAD_COND;
16048 inst.cond = COND_ALWAYS;
16057 /* Cannot have a usual-position infix on a mnemonic of less than
16058 six characters (five would be a suffix). */
16059 if (end - base < 6)
16062 /* Look for infixed mnemonic in the usual position. */
16064 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
16068 memcpy (save, affix, 2);
16069 memmove (affix, affix + 2, (end - affix) - 2);
16070 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
16072 memmove (affix + 2, affix, (end - affix) - 2);
16073 memcpy (affix, save, 2);
16076 && (opcode->tag == OT_cinfix3
16077 || opcode->tag == OT_cinfix3_deprecated
16078 || opcode->tag == OT_csuf_or_in3
16079 || opcode->tag == OT_cinfix3_legacy))
16082 if (warn_on_deprecated && unified_syntax
16083 && (opcode->tag == OT_cinfix3
16084 || opcode->tag == OT_cinfix3_deprecated))
16085 as_warn (_("conditional infixes are deprecated in unified syntax"));
16087 inst.cond = cond->value;
16094 /* This function generates an initial IT instruction, leaving its block
16095 virtually open for the new instructions. Eventually,
16096 the mask will be updated by now_it_add_mask () each time
16097 a new instruction needs to be included in the IT block.
16098 Finally, the block is closed with close_automatic_it_block ().
16099 The block closure can be requested either from md_assemble (),
16100 a tencode (), or due to a label hook. */
16103 new_automatic_it_block (int cond)
16105 now_it.state = AUTOMATIC_IT_BLOCK;
16106 now_it.mask = 0x18;
16108 now_it.block_length = 1;
16109 mapping_state (MAP_THUMB);
16110 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
16113 /* Close an automatic IT block.
16114 See comments in new_automatic_it_block (). */
16117 close_automatic_it_block (void)
16119 now_it.mask = 0x10;
16120 now_it.block_length = 0;
16123 /* Update the mask of the current automatically-generated IT
16124 instruction. See comments in new_automatic_it_block (). */
16127 now_it_add_mask (int cond)
16129 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
16130 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
16131 | ((bitvalue) << (nbit)))
16132 const int resulting_bit = (cond & 1);
16134 now_it.mask &= 0xf;
16135 now_it.mask = SET_BIT_VALUE (now_it.mask,
16137 (5 - now_it.block_length));
16138 now_it.mask = SET_BIT_VALUE (now_it.mask,
16140 ((5 - now_it.block_length) - 1) );
16141 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
16144 #undef SET_BIT_VALUE
16147 /* The IT blocks handling machinery is accessed through the these functions:
16148 it_fsm_pre_encode () from md_assemble ()
16149 set_it_insn_type () optional, from the tencode functions
16150 set_it_insn_type_last () ditto
16151 in_it_block () ditto
16152 it_fsm_post_encode () from md_assemble ()
16153 force_automatic_it_block_close () from label habdling functions
16156 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
16157 initializing the IT insn type with a generic initial value depending
16158 on the inst.condition.
16159 2) During the tencode function, two things may happen:
16160 a) The tencode function overrides the IT insn type by
16161 calling either set_it_insn_type (type) or set_it_insn_type_last ().
16162 b) The tencode function queries the IT block state by
16163 calling in_it_block () (i.e. to determine narrow/not narrow mode).
16165 Both set_it_insn_type and in_it_block run the internal FSM state
16166 handling function (handle_it_state), because: a) setting the IT insn
16167 type may incur in an invalid state (exiting the function),
16168 and b) querying the state requires the FSM to be updated.
16169 Specifically we want to avoid creating an IT block for conditional
16170 branches, so it_fsm_pre_encode is actually a guess and we can't
16171 determine whether an IT block is required until the tencode () routine
16172 has decided what type of instruction this actually it.
16173 Because of this, if set_it_insn_type and in_it_block have to be used,
16174 set_it_insn_type has to be called first.
16176 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
16177 determines the insn IT type depending on the inst.cond code.
16178 When a tencode () routine encodes an instruction that can be
16179 either outside an IT block, or, in the case of being inside, has to be
16180 the last one, set_it_insn_type_last () will determine the proper
16181 IT instruction type based on the inst.cond code. Otherwise,
16182 set_it_insn_type can be called for overriding that logic or
16183 for covering other cases.
16185 Calling handle_it_state () may not transition the IT block state to
16186 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
16187 still queried. Instead, if the FSM determines that the state should
16188 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
16189 after the tencode () function: that's what it_fsm_post_encode () does.
16191 Since in_it_block () calls the state handling function to get an
16192 updated state, an error may occur (due to invalid insns combination).
16193 In that case, inst.error is set.
16194 Therefore, inst.error has to be checked after the execution of
16195 the tencode () routine.
16197 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
16198 any pending state change (if any) that didn't take place in
16199 handle_it_state () as explained above. */
16202 it_fsm_pre_encode (void)
16204 if (inst.cond != COND_ALWAYS)
16205 inst.it_insn_type = INSIDE_IT_INSN;
16207 inst.it_insn_type = OUTSIDE_IT_INSN;
16209 now_it.state_handled = 0;
16212 /* IT state FSM handling function. */
16215 handle_it_state (void)
16217 now_it.state_handled = 1;
16219 switch (now_it.state)
16221 case OUTSIDE_IT_BLOCK:
16222 switch (inst.it_insn_type)
16224 case OUTSIDE_IT_INSN:
16227 case INSIDE_IT_INSN:
16228 case INSIDE_IT_LAST_INSN:
16229 if (thumb_mode == 0)
16232 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
16233 as_tsktsk (_("Warning: conditional outside an IT block"\
16238 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
16239 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
16241 /* Automatically generate the IT instruction. */
16242 new_automatic_it_block (inst.cond);
16243 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
16244 close_automatic_it_block ();
16248 inst.error = BAD_OUT_IT;
16254 case IF_INSIDE_IT_LAST_INSN:
16255 case NEUTRAL_IT_INSN:
16259 now_it.state = MANUAL_IT_BLOCK;
16260 now_it.block_length = 0;
16265 case AUTOMATIC_IT_BLOCK:
16266 /* Three things may happen now:
16267 a) We should increment current it block size;
16268 b) We should close current it block (closing insn or 4 insns);
16269 c) We should close current it block and start a new one (due
16270 to incompatible conditions or
16271 4 insns-length block reached). */
16273 switch (inst.it_insn_type)
16275 case OUTSIDE_IT_INSN:
16276 /* The closure of the block shall happen immediatelly,
16277 so any in_it_block () call reports the block as closed. */
16278 force_automatic_it_block_close ();
16281 case INSIDE_IT_INSN:
16282 case INSIDE_IT_LAST_INSN:
16283 case IF_INSIDE_IT_LAST_INSN:
16284 now_it.block_length++;
16286 if (now_it.block_length > 4
16287 || !now_it_compatible (inst.cond))
16289 force_automatic_it_block_close ();
16290 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
16291 new_automatic_it_block (inst.cond);
16295 now_it_add_mask (inst.cond);
16298 if (now_it.state == AUTOMATIC_IT_BLOCK
16299 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
16300 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
16301 close_automatic_it_block ();
16304 case NEUTRAL_IT_INSN:
16305 now_it.block_length++;
16307 if (now_it.block_length > 4)
16308 force_automatic_it_block_close ();
16310 now_it_add_mask (now_it.cc & 1);
16314 close_automatic_it_block ();
16315 now_it.state = MANUAL_IT_BLOCK;
16320 case MANUAL_IT_BLOCK:
16322 /* Check conditional suffixes. */
16323 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
16326 now_it.mask &= 0x1f;
16327 is_last = (now_it.mask == 0x10);
16329 switch (inst.it_insn_type)
16331 case OUTSIDE_IT_INSN:
16332 inst.error = BAD_NOT_IT;
16335 case INSIDE_IT_INSN:
16336 if (cond != inst.cond)
16338 inst.error = BAD_IT_COND;
16343 case INSIDE_IT_LAST_INSN:
16344 case IF_INSIDE_IT_LAST_INSN:
16345 if (cond != inst.cond)
16347 inst.error = BAD_IT_COND;
16352 inst.error = BAD_BRANCH;
16357 case NEUTRAL_IT_INSN:
16358 /* The BKPT instruction is unconditional even in an IT block. */
16362 inst.error = BAD_IT_IT;
16373 it_fsm_post_encode (void)
16377 if (!now_it.state_handled)
16378 handle_it_state ();
16380 is_last = (now_it.mask == 0x10);
16383 now_it.state = OUTSIDE_IT_BLOCK;
16389 force_automatic_it_block_close (void)
16391 if (now_it.state == AUTOMATIC_IT_BLOCK)
16393 close_automatic_it_block ();
16394 now_it.state = OUTSIDE_IT_BLOCK;
16402 if (!now_it.state_handled)
16403 handle_it_state ();
16405 return now_it.state != OUTSIDE_IT_BLOCK;
16409 md_assemble (char *str)
16412 const struct asm_opcode * opcode;
16414 /* Align the previous label if needed. */
16415 if (last_label_seen != NULL)
16417 symbol_set_frag (last_label_seen, frag_now);
16418 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
16419 S_SET_SEGMENT (last_label_seen, now_seg);
16422 memset (&inst, '\0', sizeof (inst));
16423 inst.reloc.type = BFD_RELOC_UNUSED;
16425 opcode = opcode_lookup (&p);
16428 /* It wasn't an instruction, but it might be a register alias of
16429 the form alias .req reg, or a Neon .dn/.qn directive. */
16430 if (! create_register_alias (str, p)
16431 && ! create_neon_reg_alias (str, p))
16432 as_bad (_("bad instruction `%s'"), str);
16437 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
16438 as_warn (_("s suffix on comparison instruction is deprecated"));
16440 /* The value which unconditional instructions should have in place of the
16441 condition field. */
16442 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
16446 arm_feature_set variant;
16448 variant = cpu_variant;
16449 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
16450 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
16451 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
16452 /* Check that this instruction is supported for this CPU. */
16453 if (!opcode->tvariant
16454 || (thumb_mode == 1
16455 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
16457 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
16460 if (inst.cond != COND_ALWAYS && !unified_syntax
16461 && opcode->tencode != do_t_branch)
16463 as_bad (_("Thumb does not support conditional execution"));
16467 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
16469 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
16470 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
16471 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
16473 /* Two things are addressed here.
16474 1) Implicit require narrow instructions on Thumb-1.
16475 This avoids relaxation accidentally introducing Thumb-2
16477 2) Reject wide instructions in non Thumb-2 cores. */
16478 if (inst.size_req == 0)
16480 else if (inst.size_req == 4)
16482 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
16488 inst.instruction = opcode->tvalue;
16490 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
16492 /* Prepare the it_insn_type for those encodings that don't set
16494 it_fsm_pre_encode ();
16496 opcode->tencode ();
16498 it_fsm_post_encode ();
16501 if (!(inst.error || inst.relax))
16503 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
16504 inst.size = (inst.instruction > 0xffff ? 4 : 2);
16505 if (inst.size_req && inst.size_req != inst.size)
16507 as_bad (_("cannot honor width suffix -- `%s'"), str);
16512 /* Something has gone badly wrong if we try to relax a fixed size
16514 gas_assert (inst.size_req == 0 || !inst.relax);
16516 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16517 *opcode->tvariant);
16518 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
16519 set those bits when Thumb-2 32-bit instructions are seen. ie.
16520 anything other than bl/blx and v6-M instructions.
16521 This is overly pessimistic for relaxable instructions. */
16522 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
16524 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
16525 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
16526 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
16529 check_neon_suffixes;
16533 mapping_state (MAP_THUMB);
16536 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
16540 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
16541 is_bx = (opcode->aencode == do_bx);
16543 /* Check that this instruction is supported for this CPU. */
16544 if (!(is_bx && fix_v4bx)
16545 && !(opcode->avariant &&
16546 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
16548 as_bad (_("selected processor does not support ARM mode `%s'"), str);
16553 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
16557 inst.instruction = opcode->avalue;
16558 if (opcode->tag == OT_unconditionalF)
16559 inst.instruction |= 0xF << 28;
16561 inst.instruction |= inst.cond << 28;
16562 inst.size = INSN_SIZE;
16563 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
16565 it_fsm_pre_encode ();
16566 opcode->aencode ();
16567 it_fsm_post_encode ();
16569 /* Arm mode bx is marked as both v4T and v5 because it's still required
16570 on a hypothetical non-thumb v5 core. */
16572 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
16574 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
16575 *opcode->avariant);
16577 check_neon_suffixes;
16581 mapping_state (MAP_ARM);
16586 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
16594 check_it_blocks_finished (void)
16599 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
16600 if (seg_info (sect)->tc_segment_info_data.current_it.state
16601 == MANUAL_IT_BLOCK)
16603 as_warn (_("section '%s' finished with an open IT block."),
16607 if (now_it.state == MANUAL_IT_BLOCK)
16608 as_warn (_("file finished with an open IT block."));
16612 /* Various frobbings of labels and their addresses. */
16615 arm_start_line_hook (void)
16617 last_label_seen = NULL;
16621 arm_frob_label (symbolS * sym)
16623 last_label_seen = sym;
16625 ARM_SET_THUMB (sym, thumb_mode);
16627 #if defined OBJ_COFF || defined OBJ_ELF
16628 ARM_SET_INTERWORK (sym, support_interwork);
16631 force_automatic_it_block_close ();
16633 /* Note - do not allow local symbols (.Lxxx) to be labelled
16634 as Thumb functions. This is because these labels, whilst
16635 they exist inside Thumb code, are not the entry points for
16636 possible ARM->Thumb calls. Also, these labels can be used
16637 as part of a computed goto or switch statement. eg gcc
16638 can generate code that looks like this:
16640 ldr r2, [pc, .Laaa]
16650 The first instruction loads the address of the jump table.
16651 The second instruction converts a table index into a byte offset.
16652 The third instruction gets the jump address out of the table.
16653 The fourth instruction performs the jump.
16655 If the address stored at .Laaa is that of a symbol which has the
16656 Thumb_Func bit set, then the linker will arrange for this address
16657 to have the bottom bit set, which in turn would mean that the
16658 address computation performed by the third instruction would end
16659 up with the bottom bit set. Since the ARM is capable of unaligned
16660 word loads, the instruction would then load the incorrect address
16661 out of the jump table, and chaos would ensue. */
16662 if (label_is_thumb_function_name
16663 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
16664 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
16666 /* When the address of a Thumb function is taken the bottom
16667 bit of that address should be set. This will allow
16668 interworking between Arm and Thumb functions to work
16671 THUMB_SET_FUNC (sym, 1);
16673 label_is_thumb_function_name = FALSE;
16676 dwarf2_emit_label (sym);
16680 arm_data_in_code (void)
16682 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
16684 *input_line_pointer = '/';
16685 input_line_pointer += 5;
16686 *input_line_pointer = 0;
16694 arm_canonicalize_symbol_name (char * name)
16698 if (thumb_mode && (len = strlen (name)) > 5
16699 && streq (name + len - 5, "/data"))
16700 *(name + len - 5) = 0;
16705 /* Table of all register names defined by default. The user can
16706 define additional names with .req. Note that all register names
16707 should appear in both upper and lowercase variants. Some registers
16708 also have mixed-case names. */
16710 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
16711 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
16712 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
16713 #define REGSET(p,t) \
16714 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
16715 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
16716 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
16717 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
16718 #define REGSETH(p,t) \
16719 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
16720 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
16721 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
16722 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
16723 #define REGSET2(p,t) \
16724 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
16725 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
16726 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
16727 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
16728 #define SPLRBANK(base,bank,t) \
16729 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
16730 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
16731 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
16732 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
16733 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
16734 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
16736 static const struct reg_entry reg_names[] =
16738 /* ARM integer registers. */
16739 REGSET(r, RN), REGSET(R, RN),
16741 /* ATPCS synonyms. */
16742 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
16743 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
16744 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
16746 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
16747 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
16748 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
16750 /* Well-known aliases. */
16751 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
16752 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
16754 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
16755 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
16757 /* Coprocessor numbers. */
16758 REGSET(p, CP), REGSET(P, CP),
16760 /* Coprocessor register numbers. The "cr" variants are for backward
16762 REGSET(c, CN), REGSET(C, CN),
16763 REGSET(cr, CN), REGSET(CR, CN),
16765 /* ARM banked registers. */
16766 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
16767 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
16768 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
16769 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
16770 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
16771 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
16772 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
16774 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
16775 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
16776 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
16777 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
16778 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
16779 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(SP_fiq,512|(13<<16),RNB),
16780 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
16781 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
16783 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
16784 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
16785 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
16786 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
16787 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
16788 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
16789 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
16790 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
16791 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
16793 /* FPA registers. */
16794 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
16795 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
16797 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
16798 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
16800 /* VFP SP registers. */
16801 REGSET(s,VFS), REGSET(S,VFS),
16802 REGSETH(s,VFS), REGSETH(S,VFS),
16804 /* VFP DP Registers. */
16805 REGSET(d,VFD), REGSET(D,VFD),
16806 /* Extra Neon DP registers. */
16807 REGSETH(d,VFD), REGSETH(D,VFD),
16809 /* Neon QP registers. */
16810 REGSET2(q,NQ), REGSET2(Q,NQ),
16812 /* VFP control registers. */
16813 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
16814 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
16815 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
16816 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
16817 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
16818 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
16820 /* Maverick DSP coprocessor registers. */
16821 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
16822 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
16824 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
16825 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
16826 REGDEF(dspsc,0,DSPSC),
16828 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
16829 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
16830 REGDEF(DSPSC,0,DSPSC),
16832 /* iWMMXt data registers - p0, c0-15. */
16833 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
16835 /* iWMMXt control registers - p1, c0-3. */
16836 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
16837 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
16838 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
16839 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
16841 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
16842 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
16843 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
16844 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
16845 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
16847 /* XScale accumulator registers. */
16848 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
16854 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
16855 within psr_required_here. */
16856 static const struct asm_psr psrs[] =
16858 /* Backward compatibility notation. Note that "all" is no longer
16859 truly all possible PSR bits. */
16860 {"all", PSR_c | PSR_f},
16864 /* Individual flags. */
16870 /* Combinations of flags. */
16871 {"fs", PSR_f | PSR_s},
16872 {"fx", PSR_f | PSR_x},
16873 {"fc", PSR_f | PSR_c},
16874 {"sf", PSR_s | PSR_f},
16875 {"sx", PSR_s | PSR_x},
16876 {"sc", PSR_s | PSR_c},
16877 {"xf", PSR_x | PSR_f},
16878 {"xs", PSR_x | PSR_s},
16879 {"xc", PSR_x | PSR_c},
16880 {"cf", PSR_c | PSR_f},
16881 {"cs", PSR_c | PSR_s},
16882 {"cx", PSR_c | PSR_x},
16883 {"fsx", PSR_f | PSR_s | PSR_x},
16884 {"fsc", PSR_f | PSR_s | PSR_c},
16885 {"fxs", PSR_f | PSR_x | PSR_s},
16886 {"fxc", PSR_f | PSR_x | PSR_c},
16887 {"fcs", PSR_f | PSR_c | PSR_s},
16888 {"fcx", PSR_f | PSR_c | PSR_x},
16889 {"sfx", PSR_s | PSR_f | PSR_x},
16890 {"sfc", PSR_s | PSR_f | PSR_c},
16891 {"sxf", PSR_s | PSR_x | PSR_f},
16892 {"sxc", PSR_s | PSR_x | PSR_c},
16893 {"scf", PSR_s | PSR_c | PSR_f},
16894 {"scx", PSR_s | PSR_c | PSR_x},
16895 {"xfs", PSR_x | PSR_f | PSR_s},
16896 {"xfc", PSR_x | PSR_f | PSR_c},
16897 {"xsf", PSR_x | PSR_s | PSR_f},
16898 {"xsc", PSR_x | PSR_s | PSR_c},
16899 {"xcf", PSR_x | PSR_c | PSR_f},
16900 {"xcs", PSR_x | PSR_c | PSR_s},
16901 {"cfs", PSR_c | PSR_f | PSR_s},
16902 {"cfx", PSR_c | PSR_f | PSR_x},
16903 {"csf", PSR_c | PSR_s | PSR_f},
16904 {"csx", PSR_c | PSR_s | PSR_x},
16905 {"cxf", PSR_c | PSR_x | PSR_f},
16906 {"cxs", PSR_c | PSR_x | PSR_s},
16907 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
16908 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
16909 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
16910 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
16911 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
16912 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
16913 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
16914 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
16915 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
16916 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
16917 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
16918 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
16919 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
16920 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
16921 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
16922 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
16923 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
16924 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
16925 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
16926 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
16927 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
16928 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
16929 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
16930 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
16933 /* Table of V7M psr names. */
16934 static const struct asm_psr v7m_psrs[] =
16936 {"apsr", 0 }, {"APSR", 0 },
16937 {"iapsr", 1 }, {"IAPSR", 1 },
16938 {"eapsr", 2 }, {"EAPSR", 2 },
16939 {"psr", 3 }, {"PSR", 3 },
16940 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
16941 {"ipsr", 5 }, {"IPSR", 5 },
16942 {"epsr", 6 }, {"EPSR", 6 },
16943 {"iepsr", 7 }, {"IEPSR", 7 },
16944 {"msp", 8 }, {"MSP", 8 },
16945 {"psp", 9 }, {"PSP", 9 },
16946 {"primask", 16}, {"PRIMASK", 16},
16947 {"basepri", 17}, {"BASEPRI", 17},
16948 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
16949 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
16950 {"faultmask", 19}, {"FAULTMASK", 19},
16951 {"control", 20}, {"CONTROL", 20}
16954 /* Table of all shift-in-operand names. */
16955 static const struct asm_shift_name shift_names [] =
16957 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
16958 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
16959 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
16960 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
16961 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
16962 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
16965 /* Table of all explicit relocation names. */
16967 static struct reloc_entry reloc_names[] =
16969 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
16970 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
16971 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
16972 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
16973 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
16974 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
16975 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
16976 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
16977 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
16978 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
16979 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
16980 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
16981 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
16982 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
16983 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
16984 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
16985 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
16986 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
16990 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
16991 static const struct asm_cond conds[] =
16995 {"cs", 0x2}, {"hs", 0x2},
16996 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
17010 static struct asm_barrier_opt barrier_opt_names[] =
17012 { "sy", 0xf }, { "SY", 0xf },
17013 { "un", 0x7 }, { "UN", 0x7 },
17014 { "st", 0xe }, { "ST", 0xe },
17015 { "unst", 0x6 }, { "UNST", 0x6 },
17016 { "ish", 0xb }, { "ISH", 0xb },
17017 { "sh", 0xb }, { "SH", 0xb },
17018 { "ishst", 0xa }, { "ISHST", 0xa },
17019 { "shst", 0xa }, { "SHST", 0xa },
17020 { "nsh", 0x7 }, { "NSH", 0x7 },
17021 { "nshst", 0x6 }, { "NSHST", 0x6 },
17022 { "osh", 0x3 }, { "OSH", 0x3 },
17023 { "oshst", 0x2 }, { "OSHST", 0x2 }
17026 /* Table of ARM-format instructions. */
17028 /* Macros for gluing together operand strings. N.B. In all cases
17029 other than OPS0, the trailing OP_stop comes from default
17030 zero-initialization of the unspecified elements of the array. */
17031 #define OPS0() { OP_stop, }
17032 #define OPS1(a) { OP_##a, }
17033 #define OPS2(a,b) { OP_##a,OP_##b, }
17034 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
17035 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
17036 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
17037 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
17039 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
17040 This is useful when mixing operands for ARM and THUMB, i.e. using the
17041 MIX_ARM_THUMB_OPERANDS macro.
17042 In order to use these macros, prefix the number of operands with _
17044 #define OPS_1(a) { a, }
17045 #define OPS_2(a,b) { a,b, }
17046 #define OPS_3(a,b,c) { a,b,c, }
17047 #define OPS_4(a,b,c,d) { a,b,c,d, }
17048 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
17049 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
17051 /* These macros abstract out the exact format of the mnemonic table and
17052 save some repeated characters. */
17054 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
17055 #define TxCE(mnem, op, top, nops, ops, ae, te) \
17056 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
17057 THUMB_VARIANT, do_##ae, do_##te }
17059 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
17060 a T_MNEM_xyz enumerator. */
17061 #define TCE(mnem, aop, top, nops, ops, ae, te) \
17062 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
17063 #define tCE(mnem, aop, top, nops, ops, ae, te) \
17064 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17066 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
17067 infix after the third character. */
17068 #define TxC3(mnem, op, top, nops, ops, ae, te) \
17069 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
17070 THUMB_VARIANT, do_##ae, do_##te }
17071 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
17072 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
17073 THUMB_VARIANT, do_##ae, do_##te }
17074 #define TC3(mnem, aop, top, nops, ops, ae, te) \
17075 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
17076 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
17077 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
17078 #define tC3(mnem, aop, top, nops, ops, ae, te) \
17079 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17080 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
17081 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
17083 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
17084 appear in the condition table. */
17085 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
17086 { m1 #m2 m3, OPS##nops ops, sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17087 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
17089 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
17090 TxCM_ (m1, , m2, op, top, nops, ops, ae, te), \
17091 TxCM_ (m1, eq, m2, op, top, nops, ops, ae, te), \
17092 TxCM_ (m1, ne, m2, op, top, nops, ops, ae, te), \
17093 TxCM_ (m1, cs, m2, op, top, nops, ops, ae, te), \
17094 TxCM_ (m1, hs, m2, op, top, nops, ops, ae, te), \
17095 TxCM_ (m1, cc, m2, op, top, nops, ops, ae, te), \
17096 TxCM_ (m1, ul, m2, op, top, nops, ops, ae, te), \
17097 TxCM_ (m1, lo, m2, op, top, nops, ops, ae, te), \
17098 TxCM_ (m1, mi, m2, op, top, nops, ops, ae, te), \
17099 TxCM_ (m1, pl, m2, op, top, nops, ops, ae, te), \
17100 TxCM_ (m1, vs, m2, op, top, nops, ops, ae, te), \
17101 TxCM_ (m1, vc, m2, op, top, nops, ops, ae, te), \
17102 TxCM_ (m1, hi, m2, op, top, nops, ops, ae, te), \
17103 TxCM_ (m1, ls, m2, op, top, nops, ops, ae, te), \
17104 TxCM_ (m1, ge, m2, op, top, nops, ops, ae, te), \
17105 TxCM_ (m1, lt, m2, op, top, nops, ops, ae, te), \
17106 TxCM_ (m1, gt, m2, op, top, nops, ops, ae, te), \
17107 TxCM_ (m1, le, m2, op, top, nops, ops, ae, te), \
17108 TxCM_ (m1, al, m2, op, top, nops, ops, ae, te)
17110 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
17111 TxCM (m1,m2, aop, 0x##top, nops, ops, ae, te)
17112 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
17113 TxCM (m1,m2, aop, T_MNEM##top, nops, ops, ae, te)
17115 /* Mnemonic that cannot be conditionalized. The ARM condition-code
17116 field is still 0xE. Many of the Thumb variants can be executed
17117 conditionally, so this is checked separately. */
17118 #define TUE(mnem, op, top, nops, ops, ae, te) \
17119 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
17120 THUMB_VARIANT, do_##ae, do_##te }
17122 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
17123 condition code field. */
17124 #define TUF(mnem, op, top, nops, ops, ae, te) \
17125 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
17126 THUMB_VARIANT, do_##ae, do_##te }
17128 /* ARM-only variants of all the above. */
17129 #define CE(mnem, op, nops, ops, ae) \
17130 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17132 #define C3(mnem, op, nops, ops, ae) \
17133 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17135 /* Legacy mnemonics that always have conditional infix after the third
17137 #define CL(mnem, op, nops, ops, ae) \
17138 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17139 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17141 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
17142 #define cCE(mnem, op, nops, ops, ae) \
17143 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17145 /* Legacy coprocessor instructions where conditional infix and conditional
17146 suffix are ambiguous. For consistency this includes all FPA instructions,
17147 not just the potentially ambiguous ones. */
17148 #define cCL(mnem, op, nops, ops, ae) \
17149 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
17150 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17152 /* Coprocessor, takes either a suffix or a position-3 infix
17153 (for an FPA corner case). */
17154 #define C3E(mnem, op, nops, ops, ae) \
17155 { mnem, OPS##nops ops, OT_csuf_or_in3, \
17156 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
17158 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
17159 { m1 #m2 m3, OPS##nops ops, \
17160 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
17161 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
17163 #define CM(m1, m2, op, nops, ops, ae) \
17164 xCM_ (m1, , m2, op, nops, ops, ae), \
17165 xCM_ (m1, eq, m2, op, nops, ops, ae), \
17166 xCM_ (m1, ne, m2, op, nops, ops, ae), \
17167 xCM_ (m1, cs, m2, op, nops, ops, ae), \
17168 xCM_ (m1, hs, m2, op, nops, ops, ae), \
17169 xCM_ (m1, cc, m2, op, nops, ops, ae), \
17170 xCM_ (m1, ul, m2, op, nops, ops, ae), \
17171 xCM_ (m1, lo, m2, op, nops, ops, ae), \
17172 xCM_ (m1, mi, m2, op, nops, ops, ae), \
17173 xCM_ (m1, pl, m2, op, nops, ops, ae), \
17174 xCM_ (m1, vs, m2, op, nops, ops, ae), \
17175 xCM_ (m1, vc, m2, op, nops, ops, ae), \
17176 xCM_ (m1, hi, m2, op, nops, ops, ae), \
17177 xCM_ (m1, ls, m2, op, nops, ops, ae), \
17178 xCM_ (m1, ge, m2, op, nops, ops, ae), \
17179 xCM_ (m1, lt, m2, op, nops, ops, ae), \
17180 xCM_ (m1, gt, m2, op, nops, ops, ae), \
17181 xCM_ (m1, le, m2, op, nops, ops, ae), \
17182 xCM_ (m1, al, m2, op, nops, ops, ae)
17184 #define UE(mnem, op, nops, ops, ae) \
17185 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17187 #define UF(mnem, op, nops, ops, ae) \
17188 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
17190 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
17191 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
17192 use the same encoding function for each. */
17193 #define NUF(mnem, op, nops, ops, enc) \
17194 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
17195 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17197 /* Neon data processing, version which indirects through neon_enc_tab for
17198 the various overloaded versions of opcodes. */
17199 #define nUF(mnem, op, nops, ops, enc) \
17200 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
17201 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17203 /* Neon insn with conditional suffix for the ARM version, non-overloaded
17205 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
17206 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
17207 THUMB_VARIANT, do_##enc, do_##enc }
17209 #define NCE(mnem, op, nops, ops, enc) \
17210 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17212 #define NCEF(mnem, op, nops, ops, enc) \
17213 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17215 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
17216 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
17217 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
17218 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
17220 #define nCE(mnem, op, nops, ops, enc) \
17221 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
17223 #define nCEF(mnem, op, nops, ops, enc) \
17224 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
17228 static const struct asm_opcode insns[] =
17230 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
17231 #define THUMB_VARIANT &arm_ext_v4t
17232 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
17233 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
17234 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
17235 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
17236 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
17237 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
17238 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
17239 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
17240 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
17241 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
17242 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
17243 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
17244 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
17245 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
17246 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
17247 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
17249 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
17250 for setting PSR flag bits. They are obsolete in V6 and do not
17251 have Thumb equivalents. */
17252 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17253 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
17254 CL("tstp", 110f000, 2, (RR, SH), cmp),
17255 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17256 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
17257 CL("cmpp", 150f000, 2, (RR, SH), cmp),
17258 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17259 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
17260 CL("cmnp", 170f000, 2, (RR, SH), cmp),
17262 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
17263 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
17264 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
17265 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
17267 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
17268 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17269 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
17271 OP_ADDRGLDR),ldst, t_ldst),
17272 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
17274 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17275 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17276 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17277 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17278 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17279 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17281 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
17282 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
17283 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
17284 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
17287 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
17288 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
17289 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
17291 /* Thumb-compatibility pseudo ops. */
17292 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
17293 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
17294 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
17295 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
17296 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
17297 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
17298 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
17299 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
17300 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
17301 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
17302 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
17303 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
17305 /* These may simplify to neg. */
17306 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
17307 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
17309 #undef THUMB_VARIANT
17310 #define THUMB_VARIANT & arm_ext_v6
17312 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
17314 /* V1 instructions with no Thumb analogue prior to V6T2. */
17315 #undef THUMB_VARIANT
17316 #define THUMB_VARIANT & arm_ext_v6t2
17318 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
17319 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
17320 CL("teqp", 130f000, 2, (RR, SH), cmp),
17322 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17323 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17324 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
17325 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
17327 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17328 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17330 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17331 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
17333 /* V1 instructions with no Thumb analogue at all. */
17334 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
17335 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
17337 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
17338 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
17339 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
17340 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
17341 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
17342 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
17343 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
17344 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
17347 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
17348 #undef THUMB_VARIANT
17349 #define THUMB_VARIANT & arm_ext_v4t
17351 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
17352 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
17354 #undef THUMB_VARIANT
17355 #define THUMB_VARIANT & arm_ext_v6t2
17357 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17358 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
17360 /* Generic coprocessor instructions. */
17361 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
17362 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17363 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17364 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17365 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17366 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
17367 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
17370 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
17372 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17373 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
17376 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
17377 #undef THUMB_VARIANT
17378 #define THUMB_VARIANT & arm_ext_msr
17380 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
17381 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
17384 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
17385 #undef THUMB_VARIANT
17386 #define THUMB_VARIANT & arm_ext_v6t2
17388 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17389 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17390 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17391 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17392 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17393 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17394 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
17395 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
17398 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
17399 #undef THUMB_VARIANT
17400 #define THUMB_VARIANT & arm_ext_v4t
17402 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17403 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17404 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17405 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17406 tCM("ld","sh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17407 tCM("ld","sb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
17410 #define ARM_VARIANT & arm_ext_v4t_5
17412 /* ARM Architecture 4T. */
17413 /* Note: bx (and blx) are required on V5, even if the processor does
17414 not support Thumb. */
17415 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
17418 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
17419 #undef THUMB_VARIANT
17420 #define THUMB_VARIANT & arm_ext_v5t
17422 /* Note: blx has 2 variants; the .value coded here is for
17423 BLX(2). Only this variant has conditional execution. */
17424 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
17425 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
17427 #undef THUMB_VARIANT
17428 #define THUMB_VARIANT & arm_ext_v6t2
17430 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
17431 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17432 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17433 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17434 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
17435 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
17436 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
17437 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
17440 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
17441 #undef THUMB_VARIANT
17442 #define THUMB_VARIANT &arm_ext_v5exp
17444 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17445 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17446 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17447 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17449 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17450 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
17452 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17453 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17454 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17455 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
17457 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17458 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17459 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17460 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17462 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17463 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17465 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17466 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17467 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17468 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
17471 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
17472 #undef THUMB_VARIANT
17473 #define THUMB_VARIANT &arm_ext_v6t2
17475 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
17476 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
17478 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
17479 ADDRGLDRS), ldrd, t_ldstd),
17481 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17482 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17485 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
17487 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
17490 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
17491 #undef THUMB_VARIANT
17492 #define THUMB_VARIANT & arm_ext_v6
17494 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
17495 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
17496 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
17497 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
17498 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
17499 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17500 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17501 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17502 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17503 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
17505 #undef THUMB_VARIANT
17506 #define THUMB_VARIANT & arm_ext_v6t2
17508 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
17509 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17511 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17512 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
17514 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
17515 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
17517 /* ARM V6 not included in V7M. */
17518 #undef THUMB_VARIANT
17519 #define THUMB_VARIANT & arm_ext_v6_notm
17520 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
17521 UF(rfeib, 9900a00, 1, (RRw), rfe),
17522 UF(rfeda, 8100a00, 1, (RRw), rfe),
17523 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
17524 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
17525 UF(rfefa, 9900a00, 1, (RRw), rfe),
17526 UF(rfeea, 8100a00, 1, (RRw), rfe),
17527 TUF("rfeed", 9100a00, e810c000, 1, (RRw), rfe, rfe),
17528 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
17529 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
17530 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
17531 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
17533 /* ARM V6 not included in V7M (eg. integer SIMD). */
17534 #undef THUMB_VARIANT
17535 #define THUMB_VARIANT & arm_ext_v6_dsp
17536 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
17537 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
17538 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
17539 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17540 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17541 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17542 /* Old name for QASX. */
17543 TCE("qaddsubx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17544 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17545 /* Old name for QSAX. */
17546 TCE("qsubaddx", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17547 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17548 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17549 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17550 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17551 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17552 /* Old name for SASX. */
17553 TCE("saddsubx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17554 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17555 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17556 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17557 /* Old name for SHASX. */
17558 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17559 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17560 /* Old name for SHSAX. */
17561 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17562 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17563 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17564 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17565 /* Old name for SSAX. */
17566 TCE("ssubaddx", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17567 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17568 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17569 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17570 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17571 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17572 /* Old name for UASX. */
17573 TCE("uaddsubx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17574 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17575 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17576 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17577 /* Old name for UHASX. */
17578 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17579 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17580 /* Old name for UHSAX. */
17581 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17582 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17583 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17584 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17585 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17586 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17587 /* Old name for UQASX. */
17588 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17589 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17590 /* Old name for UQSAX. */
17591 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17592 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17593 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17594 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17595 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17596 /* Old name for USAX. */
17597 TCE("usubaddx", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17598 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17599 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17600 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17601 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17602 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17603 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17604 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17605 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
17606 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
17607 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
17608 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17609 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17610 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17611 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17612 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17613 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17614 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17615 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
17616 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17617 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17618 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17619 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17620 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17621 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17622 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17623 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17624 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17625 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17626 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
17627 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
17628 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
17629 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
17630 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
17633 #define ARM_VARIANT & arm_ext_v6k
17634 #undef THUMB_VARIANT
17635 #define THUMB_VARIANT & arm_ext_v6k
17637 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
17638 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
17639 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
17640 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
17642 #undef THUMB_VARIANT
17643 #define THUMB_VARIANT & arm_ext_v6_notm
17644 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
17646 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
17647 RRnpcb), strexd, t_strexd),
17649 #undef THUMB_VARIANT
17650 #define THUMB_VARIANT & arm_ext_v6t2
17651 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
17653 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
17655 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17657 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
17659 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
17662 #define ARM_VARIANT & arm_ext_sec
17663 #undef THUMB_VARIANT
17664 #define THUMB_VARIANT & arm_ext_sec
17666 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
17669 #define ARM_VARIANT & arm_ext_virt
17670 #undef THUMB_VARIANT
17671 #define THUMB_VARIANT & arm_ext_virt
17673 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
17674 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
17677 #define ARM_VARIANT & arm_ext_v6t2
17678 #undef THUMB_VARIANT
17679 #define THUMB_VARIANT & arm_ext_v6t2
17681 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
17682 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
17683 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
17684 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
17686 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
17687 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
17688 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
17689 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
17691 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17692 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17693 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17694 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
17696 /* Thumb-only instructions. */
17698 #define ARM_VARIANT NULL
17699 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
17700 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
17702 /* ARM does not really have an IT instruction, so always allow it.
17703 The opcode is copied from Thumb in order to allow warnings in
17704 -mimplicit-it=[never | arm] modes. */
17706 #define ARM_VARIANT & arm_ext_v1
17708 TUE("it", bf08, bf08, 1, (COND), it, t_it),
17709 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
17710 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
17711 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
17712 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
17713 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
17714 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
17715 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
17716 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
17717 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
17718 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
17719 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
17720 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
17721 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
17722 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
17723 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
17724 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
17725 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
17727 /* Thumb2 only instructions. */
17729 #define ARM_VARIANT NULL
17731 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17732 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
17733 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
17734 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
17735 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
17736 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
17738 /* Hardware division instructions. */
17740 #define ARM_VARIANT & arm_ext_adiv
17741 #undef THUMB_VARIANT
17742 #define THUMB_VARIANT & arm_ext_div
17744 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
17745 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
17747 /* ARM V6M/V7 instructions. */
17749 #define ARM_VARIANT & arm_ext_barrier
17750 #undef THUMB_VARIANT
17751 #define THUMB_VARIANT & arm_ext_barrier
17753 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, t_barrier),
17754 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, t_barrier),
17755 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, t_barrier),
17757 /* ARM V7 instructions. */
17759 #define ARM_VARIANT & arm_ext_v7
17760 #undef THUMB_VARIANT
17761 #define THUMB_VARIANT & arm_ext_v7
17763 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
17764 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
17767 #define ARM_VARIANT & arm_ext_mp
17768 #undef THUMB_VARIANT
17769 #define THUMB_VARIANT & arm_ext_mp
17771 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
17774 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
17776 cCE("wfs", e200110, 1, (RR), rd),
17777 cCE("rfs", e300110, 1, (RR), rd),
17778 cCE("wfc", e400110, 1, (RR), rd),
17779 cCE("rfc", e500110, 1, (RR), rd),
17781 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
17782 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
17783 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
17784 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
17786 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
17787 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
17788 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
17789 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
17791 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
17792 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
17793 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
17794 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
17795 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
17796 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
17797 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
17798 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
17799 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
17800 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
17801 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
17802 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
17804 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
17805 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
17806 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
17807 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
17808 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
17809 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
17810 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
17811 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
17812 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
17813 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
17814 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
17815 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
17817 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
17818 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
17819 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
17820 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
17821 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
17822 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
17823 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
17824 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
17825 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
17826 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
17827 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
17828 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
17830 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
17831 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
17832 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
17833 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
17834 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
17835 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
17836 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
17837 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
17838 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
17839 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
17840 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
17841 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
17843 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
17844 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
17845 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
17846 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
17847 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
17848 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
17849 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
17850 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
17851 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
17852 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
17853 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
17854 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
17856 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
17857 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
17858 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
17859 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
17860 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
17861 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
17862 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
17863 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
17864 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
17865 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
17866 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
17867 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
17869 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
17870 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
17871 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
17872 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
17873 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
17874 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
17875 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
17876 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
17877 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
17878 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
17879 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
17880 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
17882 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
17883 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
17884 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
17885 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
17886 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
17887 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
17888 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
17889 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
17890 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
17891 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
17892 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
17893 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
17895 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
17896 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
17897 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
17898 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
17899 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
17900 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
17901 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
17902 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
17903 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
17904 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
17905 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
17906 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
17908 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
17909 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
17910 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
17911 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
17912 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
17913 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
17914 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
17915 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
17916 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
17917 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
17918 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
17919 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
17921 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
17922 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
17923 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
17924 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
17925 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
17926 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
17927 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
17928 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
17929 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
17930 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
17931 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
17932 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
17934 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
17935 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
17936 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
17937 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
17938 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
17939 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
17940 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
17941 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
17942 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
17943 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
17944 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
17945 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
17947 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
17948 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
17949 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
17950 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
17951 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
17952 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
17953 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
17954 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
17955 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
17956 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
17957 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
17958 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
17960 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
17961 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
17962 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
17963 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
17964 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
17965 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
17966 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
17967 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
17968 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
17969 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
17970 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
17971 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
17973 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
17974 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
17975 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
17976 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
17977 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
17978 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
17979 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
17980 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
17981 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
17982 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
17983 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
17984 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
17986 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
17987 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
17988 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
17989 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
17990 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
17991 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
17992 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
17993 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
17994 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
17995 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
17996 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
17997 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
17999 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
18000 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
18001 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
18002 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
18003 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
18004 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18005 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18006 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18007 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
18008 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
18009 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
18010 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
18012 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
18013 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
18014 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
18015 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
18016 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
18017 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18018 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18019 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18020 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
18021 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
18022 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
18023 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
18025 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
18026 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
18027 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
18028 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
18029 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
18030 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18031 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18032 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18033 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
18034 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
18035 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
18036 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
18038 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
18039 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
18040 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
18041 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
18042 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
18043 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18044 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18045 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18046 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
18047 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
18048 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
18049 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
18051 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
18052 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
18053 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
18054 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
18055 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
18056 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18057 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18058 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18059 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
18060 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
18061 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
18062 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
18064 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
18065 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
18066 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
18067 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
18068 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
18069 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18070 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18071 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18072 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
18073 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
18074 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
18075 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
18077 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
18078 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
18079 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
18080 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
18081 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
18082 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18083 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18084 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18085 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
18086 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
18087 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
18088 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
18090 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
18091 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
18092 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
18093 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
18094 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
18095 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18096 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18097 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18098 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
18099 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
18100 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
18101 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
18103 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
18104 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
18105 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
18106 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
18107 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
18108 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18109 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18110 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18111 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
18112 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
18113 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
18114 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
18116 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
18117 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
18118 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
18119 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
18120 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
18121 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18122 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18123 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18124 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
18125 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
18126 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
18127 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
18129 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18130 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18131 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18132 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18133 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18134 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18135 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18136 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18137 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18138 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18139 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18140 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18142 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18143 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18144 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18145 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18146 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18147 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18148 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18149 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18150 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18151 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18152 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18153 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18155 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
18156 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
18157 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
18158 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
18159 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
18160 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
18161 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
18162 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
18163 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
18164 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
18165 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
18166 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
18168 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
18169 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
18170 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
18171 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
18173 cCL("flts", e000110, 2, (RF, RR), rn_rd),
18174 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
18175 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
18176 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
18177 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
18178 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
18179 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
18180 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
18181 cCL("flte", e080110, 2, (RF, RR), rn_rd),
18182 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
18183 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
18184 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
18186 /* The implementation of the FIX instruction is broken on some
18187 assemblers, in that it accepts a precision specifier as well as a
18188 rounding specifier, despite the fact that this is meaningless.
18189 To be more compatible, we accept it as well, though of course it
18190 does not set any bits. */
18191 cCE("fix", e100110, 2, (RR, RF), rd_rm),
18192 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
18193 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
18194 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
18195 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
18196 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
18197 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
18198 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
18199 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
18200 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
18201 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
18202 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
18203 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
18205 /* Instructions that were new with the real FPA, call them V2. */
18207 #define ARM_VARIANT & fpu_fpa_ext_v2
18209 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18210 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18211 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18212 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18213 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18214 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
18217 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
18219 /* Moves and type conversions. */
18220 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
18221 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
18222 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
18223 cCE("fmstat", ef1fa10, 0, (), noargs),
18224 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
18225 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
18226 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
18227 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
18228 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
18229 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
18230 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
18231 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
18232 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
18233 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
18235 /* Memory operations. */
18236 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
18237 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
18238 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18239 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18240 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18241 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18242 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18243 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18244 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18245 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18246 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18247 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
18248 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18249 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
18250 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18251 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
18252 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18253 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
18255 /* Monadic operations. */
18256 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
18257 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
18258 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
18260 /* Dyadic operations. */
18261 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18262 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18263 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18264 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18265 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18266 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18267 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18268 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18269 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18272 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
18273 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
18274 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
18275 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
18277 /* Double precision load/store are still present on single precision
18278 implementations. */
18279 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
18280 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
18281 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18282 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18283 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18284 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18285 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18286 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
18287 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18288 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
18291 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
18293 /* Moves and type conversions. */
18294 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
18295 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
18296 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
18297 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
18298 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
18299 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
18300 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
18301 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
18302 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
18303 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
18304 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
18305 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
18306 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
18308 /* Monadic operations. */
18309 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
18310 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
18311 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
18313 /* Dyadic operations. */
18314 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18315 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18316 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18317 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18318 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18319 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18320 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18321 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18322 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18325 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
18326 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
18327 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
18328 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
18331 #define ARM_VARIANT & fpu_vfp_ext_v2
18333 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
18334 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
18335 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
18336 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
18338 /* Instructions which may belong to either the Neon or VFP instruction sets.
18339 Individual encoder functions perform additional architecture checks. */
18341 #define ARM_VARIANT & fpu_vfp_ext_v1xd
18342 #undef THUMB_VARIANT
18343 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
18345 /* These mnemonics are unique to VFP. */
18346 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
18347 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
18348 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18349 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18350 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18351 nCE(vcmp, _vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
18352 nCE(vcmpe, _vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
18353 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
18354 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
18355 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
18357 /* Mnemonics shared by Neon and VFP. */
18358 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
18359 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18360 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
18362 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18363 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
18365 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18366 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
18368 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18369 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18370 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18371 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18372 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18373 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
18374 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18375 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
18377 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
18378 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
18379 nCEF(vcvtb, _vcvt, 2, (RVS, RVS), neon_cvtb),
18380 nCEF(vcvtt, _vcvt, 2, (RVS, RVS), neon_cvtt),
18383 /* NOTE: All VMOV encoding is special-cased! */
18384 NCE(vmov, 0, 1, (VMOV), neon_mov),
18385 NCE(vmovq, 0, 1, (VMOV), neon_mov),
18387 #undef THUMB_VARIANT
18388 #define THUMB_VARIANT & fpu_neon_ext_v1
18390 #define ARM_VARIANT & fpu_neon_ext_v1
18392 /* Data processing with three registers of the same length. */
18393 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
18394 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
18395 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
18396 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18397 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
18398 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18399 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
18400 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
18401 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
18402 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
18403 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18404 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
18405 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
18406 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
18407 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18408 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
18409 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
18410 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
18411 /* If not immediate, fall back to neon_dyadic_i64_su.
18412 shl_imm should accept I8 I16 I32 I64,
18413 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
18414 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
18415 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
18416 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
18417 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
18418 /* Logic ops, types optional & ignored. */
18419 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18420 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18421 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18422 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18423 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18424 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18425 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
18426 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
18427 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
18428 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
18429 /* Bitfield ops, untyped. */
18430 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18431 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
18432 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18433 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
18434 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
18435 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
18436 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
18437 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18438 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
18439 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18440 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
18441 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
18442 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
18443 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
18444 back to neon_dyadic_if_su. */
18445 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18446 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
18447 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
18448 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
18449 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18450 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
18451 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
18452 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
18453 /* Comparison. Type I8 I16 I32 F32. */
18454 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
18455 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
18456 /* As above, D registers only. */
18457 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
18458 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
18459 /* Int and float variants, signedness unimportant. */
18460 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
18461 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
18462 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
18463 /* Add/sub take types I8 I16 I32 I64 F32. */
18464 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
18465 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
18466 /* vtst takes sizes 8, 16, 32. */
18467 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
18468 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
18469 /* VMUL takes I8 I16 I32 F32 P8. */
18470 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
18471 /* VQD{R}MULH takes S16 S32. */
18472 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18473 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
18474 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
18475 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
18476 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18477 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
18478 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
18479 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
18480 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18481 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
18482 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
18483 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
18484 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
18485 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
18486 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
18487 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
18489 /* Two address, int/float. Types S8 S16 S32 F32. */
18490 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
18491 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
18493 /* Data processing with two registers and a shift amount. */
18494 /* Right shifts, and variants with rounding.
18495 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
18496 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18497 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
18498 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
18499 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
18500 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
18501 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
18502 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
18503 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
18504 /* Shift and insert. Sizes accepted 8 16 32 64. */
18505 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
18506 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
18507 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
18508 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
18509 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
18510 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
18511 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
18512 /* Right shift immediate, saturating & narrowing, with rounding variants.
18513 Types accepted S16 S32 S64 U16 U32 U64. */
18514 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18515 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
18516 /* As above, unsigned. Types accepted S16 S32 S64. */
18517 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18518 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
18519 /* Right shift narrowing. Types accepted I16 I32 I64. */
18520 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18521 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
18522 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
18523 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
18524 /* CVT with optional immediate for fixed-point variant. */
18525 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
18527 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
18528 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
18530 /* Data processing, three registers of different lengths. */
18531 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
18532 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
18533 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
18534 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
18535 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
18536 /* If not scalar, fall back to neon_dyadic_long.
18537 Vector types as above, scalar types S16 S32 U16 U32. */
18538 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18539 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
18540 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
18541 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18542 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
18543 /* Dyadic, narrowing insns. Types I16 I32 I64. */
18544 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18545 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18546 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18547 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
18548 /* Saturating doubling multiplies. Types S16 S32. */
18549 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18550 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18551 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
18552 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
18553 S16 S32 U16 U32. */
18554 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
18556 /* Extract. Size 8. */
18557 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
18558 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
18560 /* Two registers, miscellaneous. */
18561 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
18562 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
18563 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
18564 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
18565 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
18566 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
18567 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
18568 /* Vector replicate. Sizes 8 16 32. */
18569 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
18570 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
18571 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
18572 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
18573 /* VMOVN. Types I16 I32 I64. */
18574 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
18575 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
18576 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
18577 /* VQMOVUN. Types S16 S32 S64. */
18578 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
18579 /* VZIP / VUZP. Sizes 8 16 32. */
18580 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
18581 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
18582 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
18583 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
18584 /* VQABS / VQNEG. Types S8 S16 S32. */
18585 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
18586 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
18587 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
18588 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
18589 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
18590 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
18591 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
18592 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
18593 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
18594 /* Reciprocal estimates. Types U32 F32. */
18595 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
18596 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
18597 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
18598 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
18599 /* VCLS. Types S8 S16 S32. */
18600 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
18601 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
18602 /* VCLZ. Types I8 I16 I32. */
18603 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
18604 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
18605 /* VCNT. Size 8. */
18606 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
18607 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
18608 /* Two address, untyped. */
18609 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
18610 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
18611 /* VTRN. Sizes 8 16 32. */
18612 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
18613 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
18615 /* Table lookup. Size 8. */
18616 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18617 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
18619 #undef THUMB_VARIANT
18620 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
18622 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
18624 /* Neon element/structure load/store. */
18625 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
18626 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
18627 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
18628 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
18629 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
18630 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
18631 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
18632 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
18634 #undef THUMB_VARIANT
18635 #define THUMB_VARIANT &fpu_vfp_ext_v3xd
18637 #define ARM_VARIANT &fpu_vfp_ext_v3xd
18638 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
18639 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18640 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18641 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18642 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18643 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18644 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18645 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
18646 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
18648 #undef THUMB_VARIANT
18649 #define THUMB_VARIANT & fpu_vfp_ext_v3
18651 #define ARM_VARIANT & fpu_vfp_ext_v3
18653 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
18654 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18655 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18656 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18657 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18658 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18659 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18660 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
18661 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
18664 #define ARM_VARIANT &fpu_vfp_ext_fma
18665 #undef THUMB_VARIANT
18666 #define THUMB_VARIANT &fpu_vfp_ext_fma
18667 /* Mnemonics shared by Neon and VFP. These are included in the
18668 VFP FMA variant; NEON and VFP FMA always includes the NEON
18669 FMA instructions. */
18670 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18671 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
18672 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
18673 the v form should always be used. */
18674 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18675 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
18676 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18677 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
18678 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18679 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
18681 #undef THUMB_VARIANT
18683 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
18685 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18686 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18687 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18688 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18689 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18690 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
18691 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
18692 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
18695 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
18697 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
18698 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
18699 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
18700 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
18701 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
18702 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
18703 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
18704 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
18705 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
18706 cCE("textrmub", e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18707 cCE("textrmuh", e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18708 cCE("textrmuw", e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
18709 cCE("textrmsb", e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18710 cCE("textrmsh", e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18711 cCE("textrmsw", e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
18712 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18713 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18714 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
18715 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
18716 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
18717 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18718 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18719 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18720 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18721 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18722 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
18723 cCE("tmovmskb", e100030, 2, (RR, RIWR), rd_rn),
18724 cCE("tmovmskh", e500030, 2, (RR, RIWR), rd_rn),
18725 cCE("tmovmskw", e900030, 2, (RR, RIWR), rd_rn),
18726 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
18727 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
18728 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
18729 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
18730 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
18731 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
18732 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
18733 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
18734 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18735 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18736 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18737 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18738 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18739 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18740 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18741 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18742 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18743 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
18744 cCE("walignr0", e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18745 cCE("walignr1", e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18746 cCE("walignr2", ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18747 cCE("walignr3", eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18748 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18749 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18750 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18751 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18752 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18753 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18754 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18755 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18756 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18757 cCE("wcmpgtub", e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18758 cCE("wcmpgtuh", e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18759 cCE("wcmpgtuw", e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18760 cCE("wcmpgtsb", e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18761 cCE("wcmpgtsh", e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18762 cCE("wcmpgtsw", eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18763 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18764 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18765 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
18766 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
18767 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18768 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18769 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18770 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18771 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18772 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18773 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18774 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18775 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18776 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18777 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18778 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18779 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18780 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18781 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18782 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18783 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18784 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18785 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
18786 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18787 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18788 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18789 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18790 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18791 cCE("wpackhss", e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18792 cCE("wpackhus", e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18793 cCE("wpackwss", eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18794 cCE("wpackwus", e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18795 cCE("wpackdss", ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18796 cCE("wpackdus", ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18797 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18798 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18799 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18800 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18801 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18802 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18803 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18804 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18805 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18806 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18807 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
18808 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18809 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18810 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18811 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18812 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18813 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18814 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18815 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18816 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18817 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18818 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18819 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18820 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18821 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18822 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18823 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18824 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
18825 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
18826 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18827 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
18828 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
18829 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
18830 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18831 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18832 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18833 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18834 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18835 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18836 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18837 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18838 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18839 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
18840 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
18841 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
18842 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
18843 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
18844 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
18845 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18846 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18847 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18848 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
18849 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
18850 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
18851 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
18852 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
18853 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
18854 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18855 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18856 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18857 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18858 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
18861 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
18863 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
18864 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
18865 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
18866 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
18867 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
18868 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
18869 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18870 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18871 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18872 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18873 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18874 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18875 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18876 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18877 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18878 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18879 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18880 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18881 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18882 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18883 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
18884 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18885 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18886 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18887 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18888 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18889 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18890 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18891 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18892 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18893 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18894 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18895 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18896 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18897 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18898 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18899 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18900 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18901 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18902 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18903 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18904 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18905 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18906 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18907 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18908 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18909 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18910 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18911 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18912 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18913 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18914 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18915 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18916 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18917 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18918 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18919 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
18922 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
18924 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
18925 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
18926 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
18927 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
18928 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
18929 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
18930 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
18931 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
18932 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
18933 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
18934 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
18935 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
18936 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
18937 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
18938 cCE("cfmv64lr", e000510, 2, (RMDX, RR), rn_rd),
18939 cCE("cfmvr64l", e100510, 2, (RR, RMDX), rd_rn),
18940 cCE("cfmv64hr", e000530, 2, (RMDX, RR), rn_rd),
18941 cCE("cfmvr64h", e100530, 2, (RR, RMDX), rd_rn),
18942 cCE("cfmval32", e200440, 2, (RMAX, RMFX), rd_rn),
18943 cCE("cfmv32al", e100440, 2, (RMFX, RMAX), rd_rn),
18944 cCE("cfmvam32", e200460, 2, (RMAX, RMFX), rd_rn),
18945 cCE("cfmv32am", e100460, 2, (RMFX, RMAX), rd_rn),
18946 cCE("cfmvah32", e200480, 2, (RMAX, RMFX), rd_rn),
18947 cCE("cfmv32ah", e100480, 2, (RMFX, RMAX), rd_rn),
18948 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
18949 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
18950 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
18951 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
18952 cCE("cfmvsc32", e2004e0, 2, (RMDS, RMDX), mav_dspsc),
18953 cCE("cfmv32sc", e1004e0, 2, (RMDX, RMDS), rd),
18954 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
18955 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
18956 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
18957 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
18958 cCE("cfcvt32s", e000480, 2, (RMF, RMFX), rd_rn),
18959 cCE("cfcvt32d", e0004a0, 2, (RMD, RMFX), rd_rn),
18960 cCE("cfcvt64s", e0004c0, 2, (RMF, RMDX), rd_rn),
18961 cCE("cfcvt64d", e0004e0, 2, (RMD, RMDX), rd_rn),
18962 cCE("cfcvts32", e100580, 2, (RMFX, RMF), rd_rn),
18963 cCE("cfcvtd32", e1005a0, 2, (RMFX, RMD), rd_rn),
18964 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
18965 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
18966 cCE("cfrshl32", e000550, 3, (RMFX, RMFX, RR), mav_triple),
18967 cCE("cfrshl64", e000570, 3, (RMDX, RMDX, RR), mav_triple),
18968 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
18969 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
18970 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
18971 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
18972 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
18973 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
18974 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
18975 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
18976 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
18977 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
18978 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
18979 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
18980 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
18981 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
18982 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
18983 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
18984 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
18985 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
18986 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
18987 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
18988 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18989 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18990 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18991 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18992 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18993 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
18994 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18995 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
18996 cCE("cfmadd32", e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18997 cCE("cfmsub32", e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
18998 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
18999 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
19002 #undef THUMB_VARIANT
19029 /* MD interface: bits in the object file. */
19031 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
19032 for use in the a.out file, and stores them in the array pointed to by buf.
19033 This knows about the endian-ness of the target machine and does
19034 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
19035 2 (short) and 4 (long) Floating numbers are put out as a series of
19036 LITTLENUMS (shorts, here at least). */
19039 md_number_to_chars (char * buf, valueT val, int n)
19041 if (target_big_endian)
19042 number_to_chars_bigendian (buf, val, n);
19044 number_to_chars_littleendian (buf, val, n);
19048 md_chars_to_number (char * buf, int n)
19051 unsigned char * where = (unsigned char *) buf;
19053 if (target_big_endian)
19058 result |= (*where++ & 255);
19066 result |= (where[n] & 255);
19073 /* MD interface: Sections. */
19075 /* Calculate the maximum variable size (i.e., excluding fr_fix)
19076 that an rs_machine_dependent frag may reach. */
19079 arm_frag_max_var (fragS *fragp)
19081 /* We only use rs_machine_dependent for variable-size Thumb instructions,
19082 which are either THUMB_SIZE (2) or INSN_SIZE (4).
19084 Note that we generate relaxable instructions even for cases that don't
19085 really need it, like an immediate that's a trivial constant. So we're
19086 overestimating the instruction size for some of those cases. Rather
19087 than putting more intelligence here, it would probably be better to
19088 avoid generating a relaxation frag in the first place when it can be
19089 determined up front that a short instruction will suffice. */
19091 gas_assert (fragp->fr_type == rs_machine_dependent);
19095 /* Estimate the size of a frag before relaxing. Assume everything fits in
19099 md_estimate_size_before_relax (fragS * fragp,
19100 segT segtype ATTRIBUTE_UNUSED)
19106 /* Convert a machine dependent frag. */
19109 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
19111 unsigned long insn;
19112 unsigned long old_op;
19120 buf = fragp->fr_literal + fragp->fr_fix;
19122 old_op = bfd_get_16(abfd, buf);
19123 if (fragp->fr_symbol)
19125 exp.X_op = O_symbol;
19126 exp.X_add_symbol = fragp->fr_symbol;
19130 exp.X_op = O_constant;
19132 exp.X_add_number = fragp->fr_offset;
19133 opcode = fragp->fr_subtype;
19136 case T_MNEM_ldr_pc:
19137 case T_MNEM_ldr_pc2:
19138 case T_MNEM_ldr_sp:
19139 case T_MNEM_str_sp:
19146 if (fragp->fr_var == 4)
19148 insn = THUMB_OP32 (opcode);
19149 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
19151 insn |= (old_op & 0x700) << 4;
19155 insn |= (old_op & 7) << 12;
19156 insn |= (old_op & 0x38) << 13;
19158 insn |= 0x00000c00;
19159 put_thumb32_insn (buf, insn);
19160 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
19164 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
19166 pc_rel = (opcode == T_MNEM_ldr_pc2);
19169 if (fragp->fr_var == 4)
19171 insn = THUMB_OP32 (opcode);
19172 insn |= (old_op & 0xf0) << 4;
19173 put_thumb32_insn (buf, insn);
19174 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
19178 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19179 exp.X_add_number -= 4;
19187 if (fragp->fr_var == 4)
19189 int r0off = (opcode == T_MNEM_mov
19190 || opcode == T_MNEM_movs) ? 0 : 8;
19191 insn = THUMB_OP32 (opcode);
19192 insn = (insn & 0xe1ffffff) | 0x10000000;
19193 insn |= (old_op & 0x700) << r0off;
19194 put_thumb32_insn (buf, insn);
19195 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19199 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
19204 if (fragp->fr_var == 4)
19206 insn = THUMB_OP32(opcode);
19207 put_thumb32_insn (buf, insn);
19208 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
19211 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
19215 if (fragp->fr_var == 4)
19217 insn = THUMB_OP32(opcode);
19218 insn |= (old_op & 0xf00) << 14;
19219 put_thumb32_insn (buf, insn);
19220 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
19223 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
19226 case T_MNEM_add_sp:
19227 case T_MNEM_add_pc:
19228 case T_MNEM_inc_sp:
19229 case T_MNEM_dec_sp:
19230 if (fragp->fr_var == 4)
19232 /* ??? Choose between add and addw. */
19233 insn = THUMB_OP32 (opcode);
19234 insn |= (old_op & 0xf0) << 4;
19235 put_thumb32_insn (buf, insn);
19236 if (opcode == T_MNEM_add_pc)
19237 reloc_type = BFD_RELOC_ARM_T32_IMM12;
19239 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19242 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19250 if (fragp->fr_var == 4)
19252 insn = THUMB_OP32 (opcode);
19253 insn |= (old_op & 0xf0) << 4;
19254 insn |= (old_op & 0xf) << 16;
19255 put_thumb32_insn (buf, insn);
19256 if (insn & (1 << 20))
19257 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
19259 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
19262 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
19268 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
19269 (enum bfd_reloc_code_real) reloc_type);
19270 fixp->fx_file = fragp->fr_file;
19271 fixp->fx_line = fragp->fr_line;
19272 fragp->fr_fix += fragp->fr_var;
19275 /* Return the size of a relaxable immediate operand instruction.
19276 SHIFT and SIZE specify the form of the allowable immediate. */
19278 relax_immediate (fragS *fragp, int size, int shift)
19284 /* ??? Should be able to do better than this. */
19285 if (fragp->fr_symbol)
19288 low = (1 << shift) - 1;
19289 mask = (1 << (shift + size)) - (1 << shift);
19290 offset = fragp->fr_offset;
19291 /* Force misaligned offsets to 32-bit variant. */
19294 if (offset & ~mask)
19299 /* Get the address of a symbol during relaxation. */
19301 relaxed_symbol_addr (fragS *fragp, long stretch)
19307 sym = fragp->fr_symbol;
19308 sym_frag = symbol_get_frag (sym);
19309 know (S_GET_SEGMENT (sym) != absolute_section
19310 || sym_frag == &zero_address_frag);
19311 addr = S_GET_VALUE (sym) + fragp->fr_offset;
19313 /* If frag has yet to be reached on this pass, assume it will
19314 move by STRETCH just as we did. If this is not so, it will
19315 be because some frag between grows, and that will force
19319 && sym_frag->relax_marker != fragp->relax_marker)
19323 /* Adjust stretch for any alignment frag. Note that if have
19324 been expanding the earlier code, the symbol may be
19325 defined in what appears to be an earlier frag. FIXME:
19326 This doesn't handle the fr_subtype field, which specifies
19327 a maximum number of bytes to skip when doing an
19329 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
19331 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
19334 stretch = - ((- stretch)
19335 & ~ ((1 << (int) f->fr_offset) - 1));
19337 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
19349 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
19352 relax_adr (fragS *fragp, asection *sec, long stretch)
19357 /* Assume worst case for symbols not known to be in the same section. */
19358 if (fragp->fr_symbol == NULL
19359 || !S_IS_DEFINED (fragp->fr_symbol)
19360 || sec != S_GET_SEGMENT (fragp->fr_symbol)
19361 || S_IS_WEAK (fragp->fr_symbol))
19364 val = relaxed_symbol_addr (fragp, stretch);
19365 addr = fragp->fr_address + fragp->fr_fix;
19366 addr = (addr + 4) & ~3;
19367 /* Force misaligned targets to 32-bit variant. */
19371 if (val < 0 || val > 1020)
19376 /* Return the size of a relaxable add/sub immediate instruction. */
19378 relax_addsub (fragS *fragp, asection *sec)
19383 buf = fragp->fr_literal + fragp->fr_fix;
19384 op = bfd_get_16(sec->owner, buf);
19385 if ((op & 0xf) == ((op >> 4) & 0xf))
19386 return relax_immediate (fragp, 8, 0);
19388 return relax_immediate (fragp, 3, 0);
19392 /* Return the size of a relaxable branch instruction. BITS is the
19393 size of the offset field in the narrow instruction. */
19396 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
19402 /* Assume worst case for symbols not known to be in the same section. */
19403 if (!S_IS_DEFINED (fragp->fr_symbol)
19404 || sec != S_GET_SEGMENT (fragp->fr_symbol)
19405 || S_IS_WEAK (fragp->fr_symbol))
19409 if (S_IS_DEFINED (fragp->fr_symbol)
19410 && ARM_IS_FUNC (fragp->fr_symbol))
19413 /* PR 12532. Global symbols with default visibility might
19414 be preempted, so do not relax relocations to them. */
19415 if ((ELF_ST_VISIBILITY (S_GET_OTHER (fragp->fr_symbol)) == STV_DEFAULT)
19416 && (! S_IS_LOCAL (fragp->fr_symbol)))
19420 val = relaxed_symbol_addr (fragp, stretch);
19421 addr = fragp->fr_address + fragp->fr_fix + 4;
19424 /* Offset is a signed value *2 */
19426 if (val >= limit || val < -limit)
19432 /* Relax a machine dependent frag. This returns the amount by which
19433 the current size of the frag should change. */
19436 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
19441 oldsize = fragp->fr_var;
19442 switch (fragp->fr_subtype)
19444 case T_MNEM_ldr_pc2:
19445 newsize = relax_adr (fragp, sec, stretch);
19447 case T_MNEM_ldr_pc:
19448 case T_MNEM_ldr_sp:
19449 case T_MNEM_str_sp:
19450 newsize = relax_immediate (fragp, 8, 2);
19454 newsize = relax_immediate (fragp, 5, 2);
19458 newsize = relax_immediate (fragp, 5, 1);
19462 newsize = relax_immediate (fragp, 5, 0);
19465 newsize = relax_adr (fragp, sec, stretch);
19471 newsize = relax_immediate (fragp, 8, 0);
19474 newsize = relax_branch (fragp, sec, 11, stretch);
19477 newsize = relax_branch (fragp, sec, 8, stretch);
19479 case T_MNEM_add_sp:
19480 case T_MNEM_add_pc:
19481 newsize = relax_immediate (fragp, 8, 2);
19483 case T_MNEM_inc_sp:
19484 case T_MNEM_dec_sp:
19485 newsize = relax_immediate (fragp, 7, 2);
19491 newsize = relax_addsub (fragp, sec);
19497 fragp->fr_var = newsize;
19498 /* Freeze wide instructions that are at or before the same location as
19499 in the previous pass. This avoids infinite loops.
19500 Don't freeze them unconditionally because targets may be artificially
19501 misaligned by the expansion of preceding frags. */
19502 if (stretch <= 0 && newsize > 2)
19504 md_convert_frag (sec->owner, sec, fragp);
19508 return newsize - oldsize;
19511 /* Round up a section size to the appropriate boundary. */
19514 md_section_align (segT segment ATTRIBUTE_UNUSED,
19517 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
19518 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
19520 /* For a.out, force the section size to be aligned. If we don't do
19521 this, BFD will align it for us, but it will not write out the
19522 final bytes of the section. This may be a bug in BFD, but it is
19523 easier to fix it here since that is how the other a.out targets
19527 align = bfd_get_section_alignment (stdoutput, segment);
19528 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
19535 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
19536 of an rs_align_code fragment. */
19539 arm_handle_align (fragS * fragP)
19541 static char const arm_noop[2][2][4] =
19544 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
19545 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
19548 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
19549 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
19552 static char const thumb_noop[2][2][2] =
19555 {0xc0, 0x46}, /* LE */
19556 {0x46, 0xc0}, /* BE */
19559 {0x00, 0xbf}, /* LE */
19560 {0xbf, 0x00} /* BE */
19563 static char const wide_thumb_noop[2][4] =
19564 { /* Wide Thumb-2 */
19565 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
19566 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
19569 unsigned bytes, fix, noop_size;
19572 const char *narrow_noop = NULL;
19577 if (fragP->fr_type != rs_align_code)
19580 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
19581 p = fragP->fr_literal + fragP->fr_fix;
19584 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
19585 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
19587 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
19589 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
19591 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
19593 narrow_noop = thumb_noop[1][target_big_endian];
19594 noop = wide_thumb_noop[target_big_endian];
19597 noop = thumb_noop[0][target_big_endian];
19605 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
19606 [target_big_endian];
19613 fragP->fr_var = noop_size;
19615 if (bytes & (noop_size - 1))
19617 fix = bytes & (noop_size - 1);
19619 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
19621 memset (p, 0, fix);
19628 if (bytes & noop_size)
19630 /* Insert a narrow noop. */
19631 memcpy (p, narrow_noop, noop_size);
19633 bytes -= noop_size;
19637 /* Use wide noops for the remainder */
19641 while (bytes >= noop_size)
19643 memcpy (p, noop, noop_size);
19645 bytes -= noop_size;
19649 fragP->fr_fix += fix;
19652 /* Called from md_do_align. Used to create an alignment
19653 frag in a code section. */
19656 arm_frag_align_code (int n, int max)
19660 /* We assume that there will never be a requirement
19661 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
19662 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
19667 _("alignments greater than %d bytes not supported in .text sections."),
19668 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
19669 as_fatal ("%s", err_msg);
19672 p = frag_var (rs_align_code,
19673 MAX_MEM_FOR_RS_ALIGN_CODE,
19675 (relax_substateT) max,
19682 /* Perform target specific initialisation of a frag.
19683 Note - despite the name this initialisation is not done when the frag
19684 is created, but only when its type is assigned. A frag can be created
19685 and used a long time before its type is set, so beware of assuming that
19686 this initialisationis performed first. */
19690 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
19692 /* Record whether this frag is in an ARM or a THUMB area. */
19693 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19696 #else /* OBJ_ELF is defined. */
19698 arm_init_frag (fragS * fragP, int max_chars)
19700 /* If the current ARM vs THUMB mode has not already
19701 been recorded into this frag then do so now. */
19702 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
19704 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
19706 /* Record a mapping symbol for alignment frags. We will delete this
19707 later if the alignment ends up empty. */
19708 switch (fragP->fr_type)
19711 case rs_align_test:
19713 mapping_state_2 (MAP_DATA, max_chars);
19715 case rs_align_code:
19716 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
19724 /* When we change sections we need to issue a new mapping symbol. */
19727 arm_elf_change_section (void)
19729 /* Link an unlinked unwind index table section to the .text section. */
19730 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
19731 && elf_linked_to_section (now_seg) == NULL)
19732 elf_linked_to_section (now_seg) = text_section;
19736 arm_elf_section_type (const char * str, size_t len)
19738 if (len == 5 && strncmp (str, "exidx", 5) == 0)
19739 return SHT_ARM_EXIDX;
19744 /* Code to deal with unwinding tables. */
19746 static void add_unwind_adjustsp (offsetT);
19748 /* Generate any deferred unwind frame offset. */
19751 flush_pending_unwind (void)
19755 offset = unwind.pending_offset;
19756 unwind.pending_offset = 0;
19758 add_unwind_adjustsp (offset);
19761 /* Add an opcode to this list for this function. Two-byte opcodes should
19762 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
19766 add_unwind_opcode (valueT op, int length)
19768 /* Add any deferred stack adjustment. */
19769 if (unwind.pending_offset)
19770 flush_pending_unwind ();
19772 unwind.sp_restored = 0;
19774 if (unwind.opcode_count + length > unwind.opcode_alloc)
19776 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
19777 if (unwind.opcodes)
19778 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
19779 unwind.opcode_alloc);
19781 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
19786 unwind.opcodes[unwind.opcode_count] = op & 0xff;
19788 unwind.opcode_count++;
19792 /* Add unwind opcodes to adjust the stack pointer. */
19795 add_unwind_adjustsp (offsetT offset)
19799 if (offset > 0x200)
19801 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
19806 /* Long form: 0xb2, uleb128. */
19807 /* This might not fit in a word so add the individual bytes,
19808 remembering the list is built in reverse order. */
19809 o = (valueT) ((offset - 0x204) >> 2);
19811 add_unwind_opcode (0, 1);
19813 /* Calculate the uleb128 encoding of the offset. */
19817 bytes[n] = o & 0x7f;
19823 /* Add the insn. */
19825 add_unwind_opcode (bytes[n - 1], 1);
19826 add_unwind_opcode (0xb2, 1);
19828 else if (offset > 0x100)
19830 /* Two short opcodes. */
19831 add_unwind_opcode (0x3f, 1);
19832 op = (offset - 0x104) >> 2;
19833 add_unwind_opcode (op, 1);
19835 else if (offset > 0)
19837 /* Short opcode. */
19838 op = (offset - 4) >> 2;
19839 add_unwind_opcode (op, 1);
19841 else if (offset < 0)
19844 while (offset > 0x100)
19846 add_unwind_opcode (0x7f, 1);
19849 op = ((offset - 4) >> 2) | 0x40;
19850 add_unwind_opcode (op, 1);
19854 /* Finish the list of unwind opcodes for this function. */
19856 finish_unwind_opcodes (void)
19860 if (unwind.fp_used)
19862 /* Adjust sp as necessary. */
19863 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
19864 flush_pending_unwind ();
19866 /* After restoring sp from the frame pointer. */
19867 op = 0x90 | unwind.fp_reg;
19868 add_unwind_opcode (op, 1);
19871 flush_pending_unwind ();
19875 /* Start an exception table entry. If idx is nonzero this is an index table
19879 start_unwind_section (const segT text_seg, int idx)
19881 const char * text_name;
19882 const char * prefix;
19883 const char * prefix_once;
19884 const char * group_name;
19888 size_t sec_name_len;
19895 prefix = ELF_STRING_ARM_unwind;
19896 prefix_once = ELF_STRING_ARM_unwind_once;
19897 type = SHT_ARM_EXIDX;
19901 prefix = ELF_STRING_ARM_unwind_info;
19902 prefix_once = ELF_STRING_ARM_unwind_info_once;
19903 type = SHT_PROGBITS;
19906 text_name = segment_name (text_seg);
19907 if (streq (text_name, ".text"))
19910 if (strncmp (text_name, ".gnu.linkonce.t.",
19911 strlen (".gnu.linkonce.t.")) == 0)
19913 prefix = prefix_once;
19914 text_name += strlen (".gnu.linkonce.t.");
19917 prefix_len = strlen (prefix);
19918 text_len = strlen (text_name);
19919 sec_name_len = prefix_len + text_len;
19920 sec_name = (char *) xmalloc (sec_name_len + 1);
19921 memcpy (sec_name, prefix, prefix_len);
19922 memcpy (sec_name + prefix_len, text_name, text_len);
19923 sec_name[prefix_len + text_len] = '\0';
19929 /* Handle COMDAT group. */
19930 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
19932 group_name = elf_group_name (text_seg);
19933 if (group_name == NULL)
19935 as_bad (_("Group section `%s' has no group signature"),
19936 segment_name (text_seg));
19937 ignore_rest_of_line ();
19940 flags |= SHF_GROUP;
19944 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
19946 /* Set the section link for index tables. */
19948 elf_linked_to_section (now_seg) = text_seg;
19952 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
19953 personality routine data. Returns zero, or the index table value for
19954 and inline entry. */
19957 create_unwind_entry (int have_data)
19962 /* The current word of data. */
19964 /* The number of bytes left in this word. */
19967 finish_unwind_opcodes ();
19969 /* Remember the current text section. */
19970 unwind.saved_seg = now_seg;
19971 unwind.saved_subseg = now_subseg;
19973 start_unwind_section (now_seg, 0);
19975 if (unwind.personality_routine == NULL)
19977 if (unwind.personality_index == -2)
19980 as_bad (_("handlerdata in cantunwind frame"));
19981 return 1; /* EXIDX_CANTUNWIND. */
19984 /* Use a default personality routine if none is specified. */
19985 if (unwind.personality_index == -1)
19987 if (unwind.opcode_count > 3)
19988 unwind.personality_index = 1;
19990 unwind.personality_index = 0;
19993 /* Space for the personality routine entry. */
19994 if (unwind.personality_index == 0)
19996 if (unwind.opcode_count > 3)
19997 as_bad (_("too many unwind opcodes for personality routine 0"));
20001 /* All the data is inline in the index table. */
20004 while (unwind.opcode_count > 0)
20006 unwind.opcode_count--;
20007 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20011 /* Pad with "finish" opcodes. */
20013 data = (data << 8) | 0xb0;
20020 /* We get two opcodes "free" in the first word. */
20021 size = unwind.opcode_count - 2;
20025 gas_assert (unwind.personality_index == -1);
20027 /* An extra byte is required for the opcode count. */
20028 size = unwind.opcode_count + 1;
20031 size = (size + 3) >> 2;
20033 as_bad (_("too many unwind opcodes"));
20035 frag_align (2, 0, 0);
20036 record_alignment (now_seg, 2);
20037 unwind.table_entry = expr_build_dot ();
20039 /* Allocate the table entry. */
20040 ptr = frag_more ((size << 2) + 4);
20041 /* PR 13449: Zero the table entries in case some of them are not used. */
20042 memset (ptr, 0, (size << 2) + 4);
20043 where = frag_now_fix () - ((size << 2) + 4);
20045 switch (unwind.personality_index)
20048 /* ??? Should this be a PLT generating relocation? */
20049 /* Custom personality routine. */
20050 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
20051 BFD_RELOC_ARM_PREL31);
20056 /* Set the first byte to the number of additional words. */
20057 data = size > 0 ? size - 1 : 0;
20061 /* ABI defined personality routines. */
20063 /* Three opcodes bytes are packed into the first word. */
20070 /* The size and first two opcode bytes go in the first word. */
20071 data = ((0x80 + unwind.personality_index) << 8) | size;
20076 /* Should never happen. */
20080 /* Pack the opcodes into words (MSB first), reversing the list at the same
20082 while (unwind.opcode_count > 0)
20086 md_number_to_chars (ptr, data, 4);
20091 unwind.opcode_count--;
20093 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
20096 /* Finish off the last word. */
20099 /* Pad with "finish" opcodes. */
20101 data = (data << 8) | 0xb0;
20103 md_number_to_chars (ptr, data, 4);
20108 /* Add an empty descriptor if there is no user-specified data. */
20109 ptr = frag_more (4);
20110 md_number_to_chars (ptr, 0, 4);
20117 /* Initialize the DWARF-2 unwind information for this procedure. */
20120 tc_arm_frame_initial_instructions (void)
20122 cfi_add_CFA_def_cfa (REG_SP, 0);
20124 #endif /* OBJ_ELF */
20126 /* Convert REGNAME to a DWARF-2 register number. */
20129 tc_arm_regname_to_dw2regnum (char *regname)
20131 int reg = arm_reg_parse (®name, REG_TYPE_RN);
20141 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
20145 exp.X_op = O_secrel;
20146 exp.X_add_symbol = symbol;
20147 exp.X_add_number = 0;
20148 emit_expr (&exp, size);
20152 /* MD interface: Symbol and relocation handling. */
20154 /* Return the address within the segment that a PC-relative fixup is
20155 relative to. For ARM, PC-relative fixups applied to instructions
20156 are generally relative to the location of the fixup plus 8 bytes.
20157 Thumb branches are offset by 4, and Thumb loads relative to PC
20158 require special handling. */
20161 md_pcrel_from_section (fixS * fixP, segT seg)
20163 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
20165 /* If this is pc-relative and we are going to emit a relocation
20166 then we just want to put out any pipeline compensation that the linker
20167 will need. Otherwise we want to use the calculated base.
20168 For WinCE we skip the bias for externals as well, since this
20169 is how the MS ARM-CE assembler behaves and we want to be compatible. */
20171 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
20172 || (arm_force_relocation (fixP)
20174 && !S_IS_EXTERNAL (fixP->fx_addsy)
20180 switch (fixP->fx_r_type)
20182 /* PC relative addressing on the Thumb is slightly odd as the
20183 bottom two bits of the PC are forced to zero for the
20184 calculation. This happens *after* application of the
20185 pipeline offset. However, Thumb adrl already adjusts for
20186 this, so we need not do it again. */
20187 case BFD_RELOC_ARM_THUMB_ADD:
20190 case BFD_RELOC_ARM_THUMB_OFFSET:
20191 case BFD_RELOC_ARM_T32_OFFSET_IMM:
20192 case BFD_RELOC_ARM_T32_ADD_PC12:
20193 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
20194 return (base + 4) & ~3;
20196 /* Thumb branches are simply offset by +4. */
20197 case BFD_RELOC_THUMB_PCREL_BRANCH7:
20198 case BFD_RELOC_THUMB_PCREL_BRANCH9:
20199 case BFD_RELOC_THUMB_PCREL_BRANCH12:
20200 case BFD_RELOC_THUMB_PCREL_BRANCH20:
20201 case BFD_RELOC_THUMB_PCREL_BRANCH25:
20204 case BFD_RELOC_THUMB_PCREL_BRANCH23:
20206 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20207 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20208 && ARM_IS_FUNC (fixP->fx_addsy)
20209 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20210 base = fixP->fx_where + fixP->fx_frag->fr_address;
20213 /* BLX is like branches above, but forces the low two bits of PC to
20215 case BFD_RELOC_THUMB_PCREL_BLX:
20217 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20218 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20219 && THUMB_IS_FUNC (fixP->fx_addsy)
20220 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20221 base = fixP->fx_where + fixP->fx_frag->fr_address;
20222 return (base + 4) & ~3;
20224 /* ARM mode branches are offset by +8. However, the Windows CE
20225 loader expects the relocation not to take this into account. */
20226 case BFD_RELOC_ARM_PCREL_BLX:
20228 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20229 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20230 && ARM_IS_FUNC (fixP->fx_addsy)
20231 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20232 base = fixP->fx_where + fixP->fx_frag->fr_address;
20235 case BFD_RELOC_ARM_PCREL_CALL:
20237 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20238 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
20239 && THUMB_IS_FUNC (fixP->fx_addsy)
20240 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
20241 base = fixP->fx_where + fixP->fx_frag->fr_address;
20244 case BFD_RELOC_ARM_PCREL_BRANCH:
20245 case BFD_RELOC_ARM_PCREL_JUMP:
20246 case BFD_RELOC_ARM_PLT32:
20248 /* When handling fixups immediately, because we have already
20249 discovered the value of a symbol, or the address of the frag involved
20250 we must account for the offset by +8, as the OS loader will never see the reloc.
20251 see fixup_segment() in write.c
20252 The S_IS_EXTERNAL test handles the case of global symbols.
20253 Those need the calculated base, not just the pipe compensation the linker will need. */
20255 && fixP->fx_addsy != NULL
20256 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
20257 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
20265 /* ARM mode loads relative to PC are also offset by +8. Unlike
20266 branches, the Windows CE loader *does* expect the relocation
20267 to take this into account. */
20268 case BFD_RELOC_ARM_OFFSET_IMM:
20269 case BFD_RELOC_ARM_OFFSET_IMM8:
20270 case BFD_RELOC_ARM_HWLITERAL:
20271 case BFD_RELOC_ARM_LITERAL:
20272 case BFD_RELOC_ARM_CP_OFF_IMM:
20276 /* Other PC-relative relocations are un-offset. */
20282 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
20283 Otherwise we have no need to default values of symbols. */
20286 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
20289 if (name[0] == '_' && name[1] == 'G'
20290 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
20294 if (symbol_find (name))
20295 as_bad (_("GOT already in the symbol table"));
20297 GOT_symbol = symbol_new (name, undefined_section,
20298 (valueT) 0, & zero_address_frag);
20308 /* Subroutine of md_apply_fix. Check to see if an immediate can be
20309 computed as two separate immediate values, added together. We
20310 already know that this value cannot be computed by just one ARM
20313 static unsigned int
20314 validate_immediate_twopart (unsigned int val,
20315 unsigned int * highpart)
20320 for (i = 0; i < 32; i += 2)
20321 if (((a = rotate_left (val, i)) & 0xff) != 0)
20327 * highpart = (a >> 8) | ((i + 24) << 7);
20329 else if (a & 0xff0000)
20331 if (a & 0xff000000)
20333 * highpart = (a >> 16) | ((i + 16) << 7);
20337 gas_assert (a & 0xff000000);
20338 * highpart = (a >> 24) | ((i + 8) << 7);
20341 return (a & 0xff) | (i << 7);
20348 validate_offset_imm (unsigned int val, int hwse)
20350 if ((hwse && val > 255) || val > 4095)
20355 /* Subroutine of md_apply_fix. Do those data_ops which can take a
20356 negative immediate constant by altering the instruction. A bit of
20361 by inverting the second operand, and
20364 by negating the second operand. */
20367 negate_data_op (unsigned long * instruction,
20368 unsigned long value)
20371 unsigned long negated, inverted;
20373 negated = encode_arm_immediate (-value);
20374 inverted = encode_arm_immediate (~value);
20376 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
20379 /* First negates. */
20380 case OPCODE_SUB: /* ADD <-> SUB */
20381 new_inst = OPCODE_ADD;
20386 new_inst = OPCODE_SUB;
20390 case OPCODE_CMP: /* CMP <-> CMN */
20391 new_inst = OPCODE_CMN;
20396 new_inst = OPCODE_CMP;
20400 /* Now Inverted ops. */
20401 case OPCODE_MOV: /* MOV <-> MVN */
20402 new_inst = OPCODE_MVN;
20407 new_inst = OPCODE_MOV;
20411 case OPCODE_AND: /* AND <-> BIC */
20412 new_inst = OPCODE_BIC;
20417 new_inst = OPCODE_AND;
20421 case OPCODE_ADC: /* ADC <-> SBC */
20422 new_inst = OPCODE_SBC;
20427 new_inst = OPCODE_ADC;
20431 /* We cannot do anything. */
20436 if (value == (unsigned) FAIL)
20439 *instruction &= OPCODE_MASK;
20440 *instruction |= new_inst << DATA_OP_SHIFT;
20444 /* Like negate_data_op, but for Thumb-2. */
20446 static unsigned int
20447 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
20451 unsigned int negated, inverted;
20453 negated = encode_thumb32_immediate (-value);
20454 inverted = encode_thumb32_immediate (~value);
20456 rd = (*instruction >> 8) & 0xf;
20457 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
20460 /* ADD <-> SUB. Includes CMP <-> CMN. */
20461 case T2_OPCODE_SUB:
20462 new_inst = T2_OPCODE_ADD;
20466 case T2_OPCODE_ADD:
20467 new_inst = T2_OPCODE_SUB;
20471 /* ORR <-> ORN. Includes MOV <-> MVN. */
20472 case T2_OPCODE_ORR:
20473 new_inst = T2_OPCODE_ORN;
20477 case T2_OPCODE_ORN:
20478 new_inst = T2_OPCODE_ORR;
20482 /* AND <-> BIC. TST has no inverted equivalent. */
20483 case T2_OPCODE_AND:
20484 new_inst = T2_OPCODE_BIC;
20491 case T2_OPCODE_BIC:
20492 new_inst = T2_OPCODE_AND;
20497 case T2_OPCODE_ADC:
20498 new_inst = T2_OPCODE_SBC;
20502 case T2_OPCODE_SBC:
20503 new_inst = T2_OPCODE_ADC;
20507 /* We cannot do anything. */
20512 if (value == (unsigned int)FAIL)
20515 *instruction &= T2_OPCODE_MASK;
20516 *instruction |= new_inst << T2_DATA_OP_SHIFT;
20520 /* Read a 32-bit thumb instruction from buf. */
20521 static unsigned long
20522 get_thumb32_insn (char * buf)
20524 unsigned long insn;
20525 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
20526 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20532 /* We usually want to set the low bit on the address of thumb function
20533 symbols. In particular .word foo - . should have the low bit set.
20534 Generic code tries to fold the difference of two symbols to
20535 a constant. Prevent this and force a relocation when the first symbols
20536 is a thumb function. */
20539 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
20541 if (op == O_subtract
20542 && l->X_op == O_symbol
20543 && r->X_op == O_symbol
20544 && THUMB_IS_FUNC (l->X_add_symbol))
20546 l->X_op = O_subtract;
20547 l->X_op_symbol = r->X_add_symbol;
20548 l->X_add_number -= r->X_add_number;
20552 /* Process as normal. */
20556 /* Encode Thumb2 unconditional branches and calls. The encoding
20557 for the 2 are identical for the immediate values. */
20560 encode_thumb2_b_bl_offset (char * buf, offsetT value)
20562 #define T2I1I2MASK ((1 << 13) | (1 << 11))
20565 addressT S, I1, I2, lo, hi;
20567 S = (value >> 24) & 0x01;
20568 I1 = (value >> 23) & 0x01;
20569 I2 = (value >> 22) & 0x01;
20570 hi = (value >> 12) & 0x3ff;
20571 lo = (value >> 1) & 0x7ff;
20572 newval = md_chars_to_number (buf, THUMB_SIZE);
20573 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
20574 newval |= (S << 10) | hi;
20575 newval2 &= ~T2I1I2MASK;
20576 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
20577 md_number_to_chars (buf, newval, THUMB_SIZE);
20578 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
20582 md_apply_fix (fixS * fixP,
20586 offsetT value = * valP;
20588 unsigned int newimm;
20589 unsigned long temp;
20591 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
20593 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
20595 /* Note whether this will delete the relocation. */
20597 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
20600 /* On a 64-bit host, silently truncate 'value' to 32 bits for
20601 consistency with the behaviour on 32-bit hosts. Remember value
20603 value &= 0xffffffff;
20604 value ^= 0x80000000;
20605 value -= 0x80000000;
20608 fixP->fx_addnumber = value;
20610 /* Same treatment for fixP->fx_offset. */
20611 fixP->fx_offset &= 0xffffffff;
20612 fixP->fx_offset ^= 0x80000000;
20613 fixP->fx_offset -= 0x80000000;
20615 switch (fixP->fx_r_type)
20617 case BFD_RELOC_NONE:
20618 /* This will need to go in the object file. */
20622 case BFD_RELOC_ARM_IMMEDIATE:
20623 /* We claim that this fixup has been processed here,
20624 even if in fact we generate an error because we do
20625 not have a reloc for it, so tc_gen_reloc will reject it. */
20628 if (fixP->fx_addsy)
20630 const char *msg = 0;
20632 if (! S_IS_DEFINED (fixP->fx_addsy))
20633 msg = _("undefined symbol %s used as an immediate value");
20634 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20635 msg = _("symbol %s is in a different section");
20636 else if (S_IS_WEAK (fixP->fx_addsy))
20637 msg = _("symbol %s is weak and may be overridden later");
20641 as_bad_where (fixP->fx_file, fixP->fx_line,
20642 msg, S_GET_NAME (fixP->fx_addsy));
20647 temp = md_chars_to_number (buf, INSN_SIZE);
20649 /* If the offset is negative, we should use encoding A2 for ADR. */
20650 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
20651 newimm = negate_data_op (&temp, value);
20654 newimm = encode_arm_immediate (value);
20656 /* If the instruction will fail, see if we can fix things up by
20657 changing the opcode. */
20658 if (newimm == (unsigned int) FAIL)
20659 newimm = negate_data_op (&temp, value);
20662 if (newimm == (unsigned int) FAIL)
20664 as_bad_where (fixP->fx_file, fixP->fx_line,
20665 _("invalid constant (%lx) after fixup"),
20666 (unsigned long) value);
20670 newimm |= (temp & 0xfffff000);
20671 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20674 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
20676 unsigned int highpart = 0;
20677 unsigned int newinsn = 0xe1a00000; /* nop. */
20679 if (fixP->fx_addsy)
20681 const char *msg = 0;
20683 if (! S_IS_DEFINED (fixP->fx_addsy))
20684 msg = _("undefined symbol %s used as an immediate value");
20685 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
20686 msg = _("symbol %s is in a different section");
20687 else if (S_IS_WEAK (fixP->fx_addsy))
20688 msg = _("symbol %s is weak and may be overridden later");
20692 as_bad_where (fixP->fx_file, fixP->fx_line,
20693 msg, S_GET_NAME (fixP->fx_addsy));
20698 newimm = encode_arm_immediate (value);
20699 temp = md_chars_to_number (buf, INSN_SIZE);
20701 /* If the instruction will fail, see if we can fix things up by
20702 changing the opcode. */
20703 if (newimm == (unsigned int) FAIL
20704 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
20706 /* No ? OK - try using two ADD instructions to generate
20708 newimm = validate_immediate_twopart (value, & highpart);
20710 /* Yes - then make sure that the second instruction is
20712 if (newimm != (unsigned int) FAIL)
20714 /* Still No ? Try using a negated value. */
20715 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
20716 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
20717 /* Otherwise - give up. */
20720 as_bad_where (fixP->fx_file, fixP->fx_line,
20721 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
20726 /* Replace the first operand in the 2nd instruction (which
20727 is the PC) with the destination register. We have
20728 already added in the PC in the first instruction and we
20729 do not want to do it again. */
20730 newinsn &= ~ 0xf0000;
20731 newinsn |= ((newinsn & 0x0f000) << 4);
20734 newimm |= (temp & 0xfffff000);
20735 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
20737 highpart |= (newinsn & 0xfffff000);
20738 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
20742 case BFD_RELOC_ARM_OFFSET_IMM:
20743 if (!fixP->fx_done && seg->use_rela_p)
20746 case BFD_RELOC_ARM_LITERAL:
20752 if (validate_offset_imm (value, 0) == FAIL)
20754 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
20755 as_bad_where (fixP->fx_file, fixP->fx_line,
20756 _("invalid literal constant: pool needs to be closer"));
20758 as_bad_where (fixP->fx_file, fixP->fx_line,
20759 _("bad immediate value for offset (%ld)"),
20764 newval = md_chars_to_number (buf, INSN_SIZE);
20766 newval &= 0xfffff000;
20769 newval &= 0xff7ff000;
20770 newval |= value | (sign ? INDEX_UP : 0);
20772 md_number_to_chars (buf, newval, INSN_SIZE);
20775 case BFD_RELOC_ARM_OFFSET_IMM8:
20776 case BFD_RELOC_ARM_HWLITERAL:
20782 if (validate_offset_imm (value, 1) == FAIL)
20784 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
20785 as_bad_where (fixP->fx_file, fixP->fx_line,
20786 _("invalid literal constant: pool needs to be closer"));
20788 as_bad (_("bad immediate value for 8-bit offset (%ld)"),
20793 newval = md_chars_to_number (buf, INSN_SIZE);
20795 newval &= 0xfffff0f0;
20798 newval &= 0xff7ff0f0;
20799 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
20801 md_number_to_chars (buf, newval, INSN_SIZE);
20804 case BFD_RELOC_ARM_T32_OFFSET_U8:
20805 if (value < 0 || value > 1020 || value % 4 != 0)
20806 as_bad_where (fixP->fx_file, fixP->fx_line,
20807 _("bad immediate value for offset (%ld)"), (long) value);
20810 newval = md_chars_to_number (buf+2, THUMB_SIZE);
20812 md_number_to_chars (buf+2, newval, THUMB_SIZE);
20815 case BFD_RELOC_ARM_T32_OFFSET_IMM:
20816 /* This is a complicated relocation used for all varieties of Thumb32
20817 load/store instruction with immediate offset:
20819 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
20820 *4, optional writeback(W)
20821 (doubleword load/store)
20823 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
20824 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
20825 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
20826 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
20827 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
20829 Uppercase letters indicate bits that are already encoded at
20830 this point. Lowercase letters are our problem. For the
20831 second block of instructions, the secondary opcode nybble
20832 (bits 8..11) is present, and bit 23 is zero, even if this is
20833 a PC-relative operation. */
20834 newval = md_chars_to_number (buf, THUMB_SIZE);
20836 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
20838 if ((newval & 0xf0000000) == 0xe0000000)
20840 /* Doubleword load/store: 8-bit offset, scaled by 4. */
20842 newval |= (1 << 23);
20845 if (value % 4 != 0)
20847 as_bad_where (fixP->fx_file, fixP->fx_line,
20848 _("offset not a multiple of 4"));
20854 as_bad_where (fixP->fx_file, fixP->fx_line,
20855 _("offset out of range"));
20860 else if ((newval & 0x000f0000) == 0x000f0000)
20862 /* PC-relative, 12-bit offset. */
20864 newval |= (1 << 23);
20869 as_bad_where (fixP->fx_file, fixP->fx_line,
20870 _("offset out of range"));
20875 else if ((newval & 0x00000100) == 0x00000100)
20877 /* Writeback: 8-bit, +/- offset. */
20879 newval |= (1 << 9);
20884 as_bad_where (fixP->fx_file, fixP->fx_line,
20885 _("offset out of range"));
20890 else if ((newval & 0x00000f00) == 0x00000e00)
20892 /* T-instruction: positive 8-bit offset. */
20893 if (value < 0 || value > 0xff)
20895 as_bad_where (fixP->fx_file, fixP->fx_line,
20896 _("offset out of range"));
20904 /* Positive 12-bit or negative 8-bit offset. */
20908 newval |= (1 << 23);
20918 as_bad_where (fixP->fx_file, fixP->fx_line,
20919 _("offset out of range"));
20926 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
20927 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
20930 case BFD_RELOC_ARM_SHIFT_IMM:
20931 newval = md_chars_to_number (buf, INSN_SIZE);
20932 if (((unsigned long) value) > 32
20934 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
20936 as_bad_where (fixP->fx_file, fixP->fx_line,
20937 _("shift expression is too large"));
20942 /* Shifts of zero must be done as lsl. */
20944 else if (value == 32)
20946 newval &= 0xfffff07f;
20947 newval |= (value & 0x1f) << 7;
20948 md_number_to_chars (buf, newval, INSN_SIZE);
20951 case BFD_RELOC_ARM_T32_IMMEDIATE:
20952 case BFD_RELOC_ARM_T32_ADD_IMM:
20953 case BFD_RELOC_ARM_T32_IMM12:
20954 case BFD_RELOC_ARM_T32_ADD_PC12:
20955 /* We claim that this fixup has been processed here,
20956 even if in fact we generate an error because we do
20957 not have a reloc for it, so tc_gen_reloc will reject it. */
20961 && ! S_IS_DEFINED (fixP->fx_addsy))
20963 as_bad_where (fixP->fx_file, fixP->fx_line,
20964 _("undefined symbol %s used as an immediate value"),
20965 S_GET_NAME (fixP->fx_addsy));
20969 newval = md_chars_to_number (buf, THUMB_SIZE);
20971 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
20974 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
20975 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20977 newimm = encode_thumb32_immediate (value);
20978 if (newimm == (unsigned int) FAIL)
20979 newimm = thumb32_negate_data_op (&newval, value);
20981 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
20982 && newimm == (unsigned int) FAIL)
20984 /* Turn add/sum into addw/subw. */
20985 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
20986 newval = (newval & 0xfeffffff) | 0x02000000;
20987 /* No flat 12-bit imm encoding for addsw/subsw. */
20988 if ((newval & 0x00100000) == 0)
20990 /* 12 bit immediate for addw/subw. */
20994 newval ^= 0x00a00000;
20997 newimm = (unsigned int) FAIL;
21003 if (newimm == (unsigned int)FAIL)
21005 as_bad_where (fixP->fx_file, fixP->fx_line,
21006 _("invalid constant (%lx) after fixup"),
21007 (unsigned long) value);
21011 newval |= (newimm & 0x800) << 15;
21012 newval |= (newimm & 0x700) << 4;
21013 newval |= (newimm & 0x0ff);
21015 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
21016 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
21019 case BFD_RELOC_ARM_SMC:
21020 if (((unsigned long) value) > 0xffff)
21021 as_bad_where (fixP->fx_file, fixP->fx_line,
21022 _("invalid smc expression"));
21023 newval = md_chars_to_number (buf, INSN_SIZE);
21024 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21025 md_number_to_chars (buf, newval, INSN_SIZE);
21028 case BFD_RELOC_ARM_HVC:
21029 if (((unsigned long) value) > 0xffff)
21030 as_bad_where (fixP->fx_file, fixP->fx_line,
21031 _("invalid hvc expression"));
21032 newval = md_chars_to_number (buf, INSN_SIZE);
21033 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
21034 md_number_to_chars (buf, newval, INSN_SIZE);
21037 case BFD_RELOC_ARM_SWI:
21038 if (fixP->tc_fix_data != 0)
21040 if (((unsigned long) value) > 0xff)
21041 as_bad_where (fixP->fx_file, fixP->fx_line,
21042 _("invalid swi expression"));
21043 newval = md_chars_to_number (buf, THUMB_SIZE);
21045 md_number_to_chars (buf, newval, THUMB_SIZE);
21049 if (((unsigned long) value) > 0x00ffffff)
21050 as_bad_where (fixP->fx_file, fixP->fx_line,
21051 _("invalid swi expression"));
21052 newval = md_chars_to_number (buf, INSN_SIZE);
21054 md_number_to_chars (buf, newval, INSN_SIZE);
21058 case BFD_RELOC_ARM_MULTI:
21059 if (((unsigned long) value) > 0xffff)
21060 as_bad_where (fixP->fx_file, fixP->fx_line,
21061 _("invalid expression in load/store multiple"));
21062 newval = value | md_chars_to_number (buf, INSN_SIZE);
21063 md_number_to_chars (buf, newval, INSN_SIZE);
21067 case BFD_RELOC_ARM_PCREL_CALL:
21069 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21071 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21072 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21073 && THUMB_IS_FUNC (fixP->fx_addsy))
21074 /* Flip the bl to blx. This is a simple flip
21075 bit here because we generate PCREL_CALL for
21076 unconditional bls. */
21078 newval = md_chars_to_number (buf, INSN_SIZE);
21079 newval = newval | 0x10000000;
21080 md_number_to_chars (buf, newval, INSN_SIZE);
21086 goto arm_branch_common;
21088 case BFD_RELOC_ARM_PCREL_JUMP:
21089 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21091 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21092 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21093 && THUMB_IS_FUNC (fixP->fx_addsy))
21095 /* This would map to a bl<cond>, b<cond>,
21096 b<always> to a Thumb function. We
21097 need to force a relocation for this particular
21099 newval = md_chars_to_number (buf, INSN_SIZE);
21103 case BFD_RELOC_ARM_PLT32:
21105 case BFD_RELOC_ARM_PCREL_BRANCH:
21107 goto arm_branch_common;
21109 case BFD_RELOC_ARM_PCREL_BLX:
21112 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
21114 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21115 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21116 && ARM_IS_FUNC (fixP->fx_addsy))
21118 /* Flip the blx to a bl and warn. */
21119 const char *name = S_GET_NAME (fixP->fx_addsy);
21120 newval = 0xeb000000;
21121 as_warn_where (fixP->fx_file, fixP->fx_line,
21122 _("blx to '%s' an ARM ISA state function changed to bl"),
21124 md_number_to_chars (buf, newval, INSN_SIZE);
21130 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21131 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
21135 /* We are going to store value (shifted right by two) in the
21136 instruction, in a 24 bit, signed field. Bits 26 through 32 either
21137 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
21138 also be be clear. */
21140 as_bad_where (fixP->fx_file, fixP->fx_line,
21141 _("misaligned branch destination"));
21142 if ((value & (offsetT)0xfe000000) != (offsetT)0
21143 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
21144 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21146 if (fixP->fx_done || !seg->use_rela_p)
21148 newval = md_chars_to_number (buf, INSN_SIZE);
21149 newval |= (value >> 2) & 0x00ffffff;
21150 /* Set the H bit on BLX instructions. */
21154 newval |= 0x01000000;
21156 newval &= ~0x01000000;
21158 md_number_to_chars (buf, newval, INSN_SIZE);
21162 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
21163 /* CBZ can only branch forward. */
21165 /* Attempts to use CBZ to branch to the next instruction
21166 (which, strictly speaking, are prohibited) will be turned into
21169 FIXME: It may be better to remove the instruction completely and
21170 perform relaxation. */
21173 newval = md_chars_to_number (buf, THUMB_SIZE);
21174 newval = 0xbf00; /* NOP encoding T1 */
21175 md_number_to_chars (buf, newval, THUMB_SIZE);
21180 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21182 if (fixP->fx_done || !seg->use_rela_p)
21184 newval = md_chars_to_number (buf, THUMB_SIZE);
21185 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
21186 md_number_to_chars (buf, newval, THUMB_SIZE);
21191 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
21192 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
21193 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21195 if (fixP->fx_done || !seg->use_rela_p)
21197 newval = md_chars_to_number (buf, THUMB_SIZE);
21198 newval |= (value & 0x1ff) >> 1;
21199 md_number_to_chars (buf, newval, THUMB_SIZE);
21203 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
21204 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
21205 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21207 if (fixP->fx_done || !seg->use_rela_p)
21209 newval = md_chars_to_number (buf, THUMB_SIZE);
21210 newval |= (value & 0xfff) >> 1;
21211 md_number_to_chars (buf, newval, THUMB_SIZE);
21215 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21217 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21218 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21219 && ARM_IS_FUNC (fixP->fx_addsy)
21220 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21222 /* Force a relocation for a branch 20 bits wide. */
21225 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
21226 as_bad_where (fixP->fx_file, fixP->fx_line,
21227 _("conditional branch out of range"));
21229 if (fixP->fx_done || !seg->use_rela_p)
21232 addressT S, J1, J2, lo, hi;
21234 S = (value & 0x00100000) >> 20;
21235 J2 = (value & 0x00080000) >> 19;
21236 J1 = (value & 0x00040000) >> 18;
21237 hi = (value & 0x0003f000) >> 12;
21238 lo = (value & 0x00000ffe) >> 1;
21240 newval = md_chars_to_number (buf, THUMB_SIZE);
21241 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21242 newval |= (S << 10) | hi;
21243 newval2 |= (J1 << 13) | (J2 << 11) | lo;
21244 md_number_to_chars (buf, newval, THUMB_SIZE);
21245 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21249 case BFD_RELOC_THUMB_PCREL_BLX:
21250 /* If there is a blx from a thumb state function to
21251 another thumb function flip this to a bl and warn
21255 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21256 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21257 && THUMB_IS_FUNC (fixP->fx_addsy))
21259 const char *name = S_GET_NAME (fixP->fx_addsy);
21260 as_warn_where (fixP->fx_file, fixP->fx_line,
21261 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
21263 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21264 newval = newval | 0x1000;
21265 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21266 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21271 goto thumb_bl_common;
21273 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21274 /* A bl from Thumb state ISA to an internal ARM state function
21275 is converted to a blx. */
21277 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21278 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21279 && ARM_IS_FUNC (fixP->fx_addsy)
21280 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21282 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21283 newval = newval & ~0x1000;
21284 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
21285 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
21292 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
21293 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21294 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
21297 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
21298 /* For a BLX instruction, make sure that the relocation is rounded up
21299 to a word boundary. This follows the semantics of the instruction
21300 which specifies that bit 1 of the target address will come from bit
21301 1 of the base address. */
21302 value = (value + 1) & ~ 1;
21304 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
21306 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
21307 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21308 else if ((value & ~0x1ffffff)
21309 && ((value & ~0x1ffffff) != ~0x1ffffff))
21310 as_bad_where (fixP->fx_file, fixP->fx_line,
21311 _("Thumb2 branch out of range"));
21314 if (fixP->fx_done || !seg->use_rela_p)
21315 encode_thumb2_b_bl_offset (buf, value);
21319 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21320 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
21321 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
21323 if (fixP->fx_done || !seg->use_rela_p)
21324 encode_thumb2_b_bl_offset (buf, value);
21329 if (fixP->fx_done || !seg->use_rela_p)
21330 md_number_to_chars (buf, value, 1);
21334 if (fixP->fx_done || !seg->use_rela_p)
21335 md_number_to_chars (buf, value, 2);
21339 case BFD_RELOC_ARM_TLS_CALL:
21340 case BFD_RELOC_ARM_THM_TLS_CALL:
21341 case BFD_RELOC_ARM_TLS_DESCSEQ:
21342 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21343 S_SET_THREAD_LOCAL (fixP->fx_addsy);
21346 case BFD_RELOC_ARM_TLS_GOTDESC:
21347 case BFD_RELOC_ARM_TLS_GD32:
21348 case BFD_RELOC_ARM_TLS_LE32:
21349 case BFD_RELOC_ARM_TLS_IE32:
21350 case BFD_RELOC_ARM_TLS_LDM32:
21351 case BFD_RELOC_ARM_TLS_LDO32:
21352 S_SET_THREAD_LOCAL (fixP->fx_addsy);
21355 case BFD_RELOC_ARM_GOT32:
21356 case BFD_RELOC_ARM_GOTOFF:
21357 if (fixP->fx_done || !seg->use_rela_p)
21358 md_number_to_chars (buf, 0, 4);
21361 case BFD_RELOC_ARM_GOT_PREL:
21362 if (fixP->fx_done || !seg->use_rela_p)
21363 md_number_to_chars (buf, value, 4);
21366 case BFD_RELOC_ARM_TARGET2:
21367 /* TARGET2 is not partial-inplace, so we need to write the
21368 addend here for REL targets, because it won't be written out
21369 during reloc processing later. */
21370 if (fixP->fx_done || !seg->use_rela_p)
21371 md_number_to_chars (buf, fixP->fx_offset, 4);
21375 case BFD_RELOC_RVA:
21377 case BFD_RELOC_ARM_TARGET1:
21378 case BFD_RELOC_ARM_ROSEGREL32:
21379 case BFD_RELOC_ARM_SBREL32:
21380 case BFD_RELOC_32_PCREL:
21382 case BFD_RELOC_32_SECREL:
21384 if (fixP->fx_done || !seg->use_rela_p)
21386 /* For WinCE we only do this for pcrel fixups. */
21387 if (fixP->fx_done || fixP->fx_pcrel)
21389 md_number_to_chars (buf, value, 4);
21393 case BFD_RELOC_ARM_PREL31:
21394 if (fixP->fx_done || !seg->use_rela_p)
21396 newval = md_chars_to_number (buf, 4) & 0x80000000;
21397 if ((value ^ (value >> 1)) & 0x40000000)
21399 as_bad_where (fixP->fx_file, fixP->fx_line,
21400 _("rel31 relocation overflow"));
21402 newval |= value & 0x7fffffff;
21403 md_number_to_chars (buf, newval, 4);
21408 case BFD_RELOC_ARM_CP_OFF_IMM:
21409 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21410 if (value < -1023 || value > 1023 || (value & 3))
21411 as_bad_where (fixP->fx_file, fixP->fx_line,
21412 _("co-processor offset out of range"));
21417 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21418 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21419 newval = md_chars_to_number (buf, INSN_SIZE);
21421 newval = get_thumb32_insn (buf);
21423 newval &= 0xffffff00;
21426 newval &= 0xff7fff00;
21427 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
21429 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
21430 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
21431 md_number_to_chars (buf, newval, INSN_SIZE);
21433 put_thumb32_insn (buf, newval);
21436 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
21437 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
21438 if (value < -255 || value > 255)
21439 as_bad_where (fixP->fx_file, fixP->fx_line,
21440 _("co-processor offset out of range"));
21442 goto cp_off_common;
21444 case BFD_RELOC_ARM_THUMB_OFFSET:
21445 newval = md_chars_to_number (buf, THUMB_SIZE);
21446 /* Exactly what ranges, and where the offset is inserted depends
21447 on the type of instruction, we can establish this from the
21449 switch (newval >> 12)
21451 case 4: /* PC load. */
21452 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
21453 forced to zero for these loads; md_pcrel_from has already
21454 compensated for this. */
21456 as_bad_where (fixP->fx_file, fixP->fx_line,
21457 _("invalid offset, target not word aligned (0x%08lX)"),
21458 (((unsigned long) fixP->fx_frag->fr_address
21459 + (unsigned long) fixP->fx_where) & ~3)
21460 + (unsigned long) value);
21462 if (value & ~0x3fc)
21463 as_bad_where (fixP->fx_file, fixP->fx_line,
21464 _("invalid offset, value too big (0x%08lX)"),
21467 newval |= value >> 2;
21470 case 9: /* SP load/store. */
21471 if (value & ~0x3fc)
21472 as_bad_where (fixP->fx_file, fixP->fx_line,
21473 _("invalid offset, value too big (0x%08lX)"),
21475 newval |= value >> 2;
21478 case 6: /* Word load/store. */
21480 as_bad_where (fixP->fx_file, fixP->fx_line,
21481 _("invalid offset, value too big (0x%08lX)"),
21483 newval |= value << 4; /* 6 - 2. */
21486 case 7: /* Byte load/store. */
21488 as_bad_where (fixP->fx_file, fixP->fx_line,
21489 _("invalid offset, value too big (0x%08lX)"),
21491 newval |= value << 6;
21494 case 8: /* Halfword load/store. */
21496 as_bad_where (fixP->fx_file, fixP->fx_line,
21497 _("invalid offset, value too big (0x%08lX)"),
21499 newval |= value << 5; /* 6 - 1. */
21503 as_bad_where (fixP->fx_file, fixP->fx_line,
21504 "Unable to process relocation for thumb opcode: %lx",
21505 (unsigned long) newval);
21508 md_number_to_chars (buf, newval, THUMB_SIZE);
21511 case BFD_RELOC_ARM_THUMB_ADD:
21512 /* This is a complicated relocation, since we use it for all of
21513 the following immediate relocations:
21517 9bit ADD/SUB SP word-aligned
21518 10bit ADD PC/SP word-aligned
21520 The type of instruction being processed is encoded in the
21527 newval = md_chars_to_number (buf, THUMB_SIZE);
21529 int rd = (newval >> 4) & 0xf;
21530 int rs = newval & 0xf;
21531 int subtract = !!(newval & 0x8000);
21533 /* Check for HI regs, only very restricted cases allowed:
21534 Adjusting SP, and using PC or SP to get an address. */
21535 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
21536 || (rs > 7 && rs != REG_SP && rs != REG_PC))
21537 as_bad_where (fixP->fx_file, fixP->fx_line,
21538 _("invalid Hi register with immediate"));
21540 /* If value is negative, choose the opposite instruction. */
21544 subtract = !subtract;
21546 as_bad_where (fixP->fx_file, fixP->fx_line,
21547 _("immediate value out of range"));
21552 if (value & ~0x1fc)
21553 as_bad_where (fixP->fx_file, fixP->fx_line,
21554 _("invalid immediate for stack address calculation"));
21555 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
21556 newval |= value >> 2;
21558 else if (rs == REG_PC || rs == REG_SP)
21560 if (subtract || value & ~0x3fc)
21561 as_bad_where (fixP->fx_file, fixP->fx_line,
21562 _("invalid immediate for address calculation (value = 0x%08lX)"),
21563 (unsigned long) value);
21564 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
21566 newval |= value >> 2;
21571 as_bad_where (fixP->fx_file, fixP->fx_line,
21572 _("immediate value out of range"));
21573 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
21574 newval |= (rd << 8) | value;
21579 as_bad_where (fixP->fx_file, fixP->fx_line,
21580 _("immediate value out of range"));
21581 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
21582 newval |= rd | (rs << 3) | (value << 6);
21585 md_number_to_chars (buf, newval, THUMB_SIZE);
21588 case BFD_RELOC_ARM_THUMB_IMM:
21589 newval = md_chars_to_number (buf, THUMB_SIZE);
21590 if (value < 0 || value > 255)
21591 as_bad_where (fixP->fx_file, fixP->fx_line,
21592 _("invalid immediate: %ld is out of range"),
21595 md_number_to_chars (buf, newval, THUMB_SIZE);
21598 case BFD_RELOC_ARM_THUMB_SHIFT:
21599 /* 5bit shift value (0..32). LSL cannot take 32. */
21600 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
21601 temp = newval & 0xf800;
21602 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
21603 as_bad_where (fixP->fx_file, fixP->fx_line,
21604 _("invalid shift value: %ld"), (long) value);
21605 /* Shifts of zero must be encoded as LSL. */
21607 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
21608 /* Shifts of 32 are encoded as zero. */
21609 else if (value == 32)
21611 newval |= value << 6;
21612 md_number_to_chars (buf, newval, THUMB_SIZE);
21615 case BFD_RELOC_VTABLE_INHERIT:
21616 case BFD_RELOC_VTABLE_ENTRY:
21620 case BFD_RELOC_ARM_MOVW:
21621 case BFD_RELOC_ARM_MOVT:
21622 case BFD_RELOC_ARM_THUMB_MOVW:
21623 case BFD_RELOC_ARM_THUMB_MOVT:
21624 if (fixP->fx_done || !seg->use_rela_p)
21626 /* REL format relocations are limited to a 16-bit addend. */
21627 if (!fixP->fx_done)
21629 if (value < -0x8000 || value > 0x7fff)
21630 as_bad_where (fixP->fx_file, fixP->fx_line,
21631 _("offset out of range"));
21633 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
21634 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21639 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
21640 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
21642 newval = get_thumb32_insn (buf);
21643 newval &= 0xfbf08f00;
21644 newval |= (value & 0xf000) << 4;
21645 newval |= (value & 0x0800) << 15;
21646 newval |= (value & 0x0700) << 4;
21647 newval |= (value & 0x00ff);
21648 put_thumb32_insn (buf, newval);
21652 newval = md_chars_to_number (buf, 4);
21653 newval &= 0xfff0f000;
21654 newval |= value & 0x0fff;
21655 newval |= (value & 0xf000) << 4;
21656 md_number_to_chars (buf, newval, 4);
21661 case BFD_RELOC_ARM_ALU_PC_G0_NC:
21662 case BFD_RELOC_ARM_ALU_PC_G0:
21663 case BFD_RELOC_ARM_ALU_PC_G1_NC:
21664 case BFD_RELOC_ARM_ALU_PC_G1:
21665 case BFD_RELOC_ARM_ALU_PC_G2:
21666 case BFD_RELOC_ARM_ALU_SB_G0_NC:
21667 case BFD_RELOC_ARM_ALU_SB_G0:
21668 case BFD_RELOC_ARM_ALU_SB_G1_NC:
21669 case BFD_RELOC_ARM_ALU_SB_G1:
21670 case BFD_RELOC_ARM_ALU_SB_G2:
21671 gas_assert (!fixP->fx_done);
21672 if (!seg->use_rela_p)
21675 bfd_vma encoded_addend;
21676 bfd_vma addend_abs = abs (value);
21678 /* Check that the absolute value of the addend can be
21679 expressed as an 8-bit constant plus a rotation. */
21680 encoded_addend = encode_arm_immediate (addend_abs);
21681 if (encoded_addend == (unsigned int) FAIL)
21682 as_bad_where (fixP->fx_file, fixP->fx_line,
21683 _("the offset 0x%08lX is not representable"),
21684 (unsigned long) addend_abs);
21686 /* Extract the instruction. */
21687 insn = md_chars_to_number (buf, INSN_SIZE);
21689 /* If the addend is positive, use an ADD instruction.
21690 Otherwise use a SUB. Take care not to destroy the S bit. */
21691 insn &= 0xff1fffff;
21697 /* Place the encoded addend into the first 12 bits of the
21699 insn &= 0xfffff000;
21700 insn |= encoded_addend;
21702 /* Update the instruction. */
21703 md_number_to_chars (buf, insn, INSN_SIZE);
21707 case BFD_RELOC_ARM_LDR_PC_G0:
21708 case BFD_RELOC_ARM_LDR_PC_G1:
21709 case BFD_RELOC_ARM_LDR_PC_G2:
21710 case BFD_RELOC_ARM_LDR_SB_G0:
21711 case BFD_RELOC_ARM_LDR_SB_G1:
21712 case BFD_RELOC_ARM_LDR_SB_G2:
21713 gas_assert (!fixP->fx_done);
21714 if (!seg->use_rela_p)
21717 bfd_vma addend_abs = abs (value);
21719 /* Check that the absolute value of the addend can be
21720 encoded in 12 bits. */
21721 if (addend_abs >= 0x1000)
21722 as_bad_where (fixP->fx_file, fixP->fx_line,
21723 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
21724 (unsigned long) addend_abs);
21726 /* Extract the instruction. */
21727 insn = md_chars_to_number (buf, INSN_SIZE);
21729 /* If the addend is negative, clear bit 23 of the instruction.
21730 Otherwise set it. */
21732 insn &= ~(1 << 23);
21736 /* Place the absolute value of the addend into the first 12 bits
21737 of the instruction. */
21738 insn &= 0xfffff000;
21739 insn |= addend_abs;
21741 /* Update the instruction. */
21742 md_number_to_chars (buf, insn, INSN_SIZE);
21746 case BFD_RELOC_ARM_LDRS_PC_G0:
21747 case BFD_RELOC_ARM_LDRS_PC_G1:
21748 case BFD_RELOC_ARM_LDRS_PC_G2:
21749 case BFD_RELOC_ARM_LDRS_SB_G0:
21750 case BFD_RELOC_ARM_LDRS_SB_G1:
21751 case BFD_RELOC_ARM_LDRS_SB_G2:
21752 gas_assert (!fixP->fx_done);
21753 if (!seg->use_rela_p)
21756 bfd_vma addend_abs = abs (value);
21758 /* Check that the absolute value of the addend can be
21759 encoded in 8 bits. */
21760 if (addend_abs >= 0x100)
21761 as_bad_where (fixP->fx_file, fixP->fx_line,
21762 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
21763 (unsigned long) addend_abs);
21765 /* Extract the instruction. */
21766 insn = md_chars_to_number (buf, INSN_SIZE);
21768 /* If the addend is negative, clear bit 23 of the instruction.
21769 Otherwise set it. */
21771 insn &= ~(1 << 23);
21775 /* Place the first four bits of the absolute value of the addend
21776 into the first 4 bits of the instruction, and the remaining
21777 four into bits 8 .. 11. */
21778 insn &= 0xfffff0f0;
21779 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
21781 /* Update the instruction. */
21782 md_number_to_chars (buf, insn, INSN_SIZE);
21786 case BFD_RELOC_ARM_LDC_PC_G0:
21787 case BFD_RELOC_ARM_LDC_PC_G1:
21788 case BFD_RELOC_ARM_LDC_PC_G2:
21789 case BFD_RELOC_ARM_LDC_SB_G0:
21790 case BFD_RELOC_ARM_LDC_SB_G1:
21791 case BFD_RELOC_ARM_LDC_SB_G2:
21792 gas_assert (!fixP->fx_done);
21793 if (!seg->use_rela_p)
21796 bfd_vma addend_abs = abs (value);
21798 /* Check that the absolute value of the addend is a multiple of
21799 four and, when divided by four, fits in 8 bits. */
21800 if (addend_abs & 0x3)
21801 as_bad_where (fixP->fx_file, fixP->fx_line,
21802 _("bad offset 0x%08lX (must be word-aligned)"),
21803 (unsigned long) addend_abs);
21805 if ((addend_abs >> 2) > 0xff)
21806 as_bad_where (fixP->fx_file, fixP->fx_line,
21807 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
21808 (unsigned long) addend_abs);
21810 /* Extract the instruction. */
21811 insn = md_chars_to_number (buf, INSN_SIZE);
21813 /* If the addend is negative, clear bit 23 of the instruction.
21814 Otherwise set it. */
21816 insn &= ~(1 << 23);
21820 /* Place the addend (divided by four) into the first eight
21821 bits of the instruction. */
21822 insn &= 0xfffffff0;
21823 insn |= addend_abs >> 2;
21825 /* Update the instruction. */
21826 md_number_to_chars (buf, insn, INSN_SIZE);
21830 case BFD_RELOC_ARM_V4BX:
21831 /* This will need to go in the object file. */
21835 case BFD_RELOC_UNUSED:
21837 as_bad_where (fixP->fx_file, fixP->fx_line,
21838 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
21842 /* Translate internal representation of relocation info to BFD target
21846 tc_gen_reloc (asection *section, fixS *fixp)
21849 bfd_reloc_code_real_type code;
21851 reloc = (arelent *) xmalloc (sizeof (arelent));
21853 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
21854 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
21855 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
21857 if (fixp->fx_pcrel)
21859 if (section->use_rela_p)
21860 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
21862 fixp->fx_offset = reloc->address;
21864 reloc->addend = fixp->fx_offset;
21866 switch (fixp->fx_r_type)
21869 if (fixp->fx_pcrel)
21871 code = BFD_RELOC_8_PCREL;
21876 if (fixp->fx_pcrel)
21878 code = BFD_RELOC_16_PCREL;
21883 if (fixp->fx_pcrel)
21885 code = BFD_RELOC_32_PCREL;
21889 case BFD_RELOC_ARM_MOVW:
21890 if (fixp->fx_pcrel)
21892 code = BFD_RELOC_ARM_MOVW_PCREL;
21896 case BFD_RELOC_ARM_MOVT:
21897 if (fixp->fx_pcrel)
21899 code = BFD_RELOC_ARM_MOVT_PCREL;
21903 case BFD_RELOC_ARM_THUMB_MOVW:
21904 if (fixp->fx_pcrel)
21906 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
21910 case BFD_RELOC_ARM_THUMB_MOVT:
21911 if (fixp->fx_pcrel)
21913 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
21917 case BFD_RELOC_NONE:
21918 case BFD_RELOC_ARM_PCREL_BRANCH:
21919 case BFD_RELOC_ARM_PCREL_BLX:
21920 case BFD_RELOC_RVA:
21921 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21922 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21923 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21924 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21925 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21926 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21927 case BFD_RELOC_VTABLE_ENTRY:
21928 case BFD_RELOC_VTABLE_INHERIT:
21930 case BFD_RELOC_32_SECREL:
21932 code = fixp->fx_r_type;
21935 case BFD_RELOC_THUMB_PCREL_BLX:
21937 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
21938 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
21941 code = BFD_RELOC_THUMB_PCREL_BLX;
21944 case BFD_RELOC_ARM_LITERAL:
21945 case BFD_RELOC_ARM_HWLITERAL:
21946 /* If this is called then the a literal has
21947 been referenced across a section boundary. */
21948 as_bad_where (fixp->fx_file, fixp->fx_line,
21949 _("literal referenced across section boundary"));
21953 case BFD_RELOC_ARM_TLS_CALL:
21954 case BFD_RELOC_ARM_THM_TLS_CALL:
21955 case BFD_RELOC_ARM_TLS_DESCSEQ:
21956 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
21957 case BFD_RELOC_ARM_GOT32:
21958 case BFD_RELOC_ARM_GOTOFF:
21959 case BFD_RELOC_ARM_GOT_PREL:
21960 case BFD_RELOC_ARM_PLT32:
21961 case BFD_RELOC_ARM_TARGET1:
21962 case BFD_RELOC_ARM_ROSEGREL32:
21963 case BFD_RELOC_ARM_SBREL32:
21964 case BFD_RELOC_ARM_PREL31:
21965 case BFD_RELOC_ARM_TARGET2:
21966 case BFD_RELOC_ARM_TLS_LE32:
21967 case BFD_RELOC_ARM_TLS_LDO32:
21968 case BFD_RELOC_ARM_PCREL_CALL:
21969 case BFD_RELOC_ARM_PCREL_JUMP:
21970 case BFD_RELOC_ARM_ALU_PC_G0_NC:
21971 case BFD_RELOC_ARM_ALU_PC_G0:
21972 case BFD_RELOC_ARM_ALU_PC_G1_NC:
21973 case BFD_RELOC_ARM_ALU_PC_G1:
21974 case BFD_RELOC_ARM_ALU_PC_G2:
21975 case BFD_RELOC_ARM_LDR_PC_G0:
21976 case BFD_RELOC_ARM_LDR_PC_G1:
21977 case BFD_RELOC_ARM_LDR_PC_G2:
21978 case BFD_RELOC_ARM_LDRS_PC_G0:
21979 case BFD_RELOC_ARM_LDRS_PC_G1:
21980 case BFD_RELOC_ARM_LDRS_PC_G2:
21981 case BFD_RELOC_ARM_LDC_PC_G0:
21982 case BFD_RELOC_ARM_LDC_PC_G1:
21983 case BFD_RELOC_ARM_LDC_PC_G2:
21984 case BFD_RELOC_ARM_ALU_SB_G0_NC:
21985 case BFD_RELOC_ARM_ALU_SB_G0:
21986 case BFD_RELOC_ARM_ALU_SB_G1_NC:
21987 case BFD_RELOC_ARM_ALU_SB_G1:
21988 case BFD_RELOC_ARM_ALU_SB_G2:
21989 case BFD_RELOC_ARM_LDR_SB_G0:
21990 case BFD_RELOC_ARM_LDR_SB_G1:
21991 case BFD_RELOC_ARM_LDR_SB_G2:
21992 case BFD_RELOC_ARM_LDRS_SB_G0:
21993 case BFD_RELOC_ARM_LDRS_SB_G1:
21994 case BFD_RELOC_ARM_LDRS_SB_G2:
21995 case BFD_RELOC_ARM_LDC_SB_G0:
21996 case BFD_RELOC_ARM_LDC_SB_G1:
21997 case BFD_RELOC_ARM_LDC_SB_G2:
21998 case BFD_RELOC_ARM_V4BX:
21999 code = fixp->fx_r_type;
22002 case BFD_RELOC_ARM_TLS_GOTDESC:
22003 case BFD_RELOC_ARM_TLS_GD32:
22004 case BFD_RELOC_ARM_TLS_IE32:
22005 case BFD_RELOC_ARM_TLS_LDM32:
22006 /* BFD will include the symbol's address in the addend.
22007 But we don't want that, so subtract it out again here. */
22008 if (!S_IS_COMMON (fixp->fx_addsy))
22009 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
22010 code = fixp->fx_r_type;
22014 case BFD_RELOC_ARM_IMMEDIATE:
22015 as_bad_where (fixp->fx_file, fixp->fx_line,
22016 _("internal relocation (type: IMMEDIATE) not fixed up"));
22019 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
22020 as_bad_where (fixp->fx_file, fixp->fx_line,
22021 _("ADRL used for a symbol not defined in the same file"));
22024 case BFD_RELOC_ARM_OFFSET_IMM:
22025 if (section->use_rela_p)
22027 code = fixp->fx_r_type;
22031 if (fixp->fx_addsy != NULL
22032 && !S_IS_DEFINED (fixp->fx_addsy)
22033 && S_IS_LOCAL (fixp->fx_addsy))
22035 as_bad_where (fixp->fx_file, fixp->fx_line,
22036 _("undefined local label `%s'"),
22037 S_GET_NAME (fixp->fx_addsy));
22041 as_bad_where (fixp->fx_file, fixp->fx_line,
22042 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
22049 switch (fixp->fx_r_type)
22051 case BFD_RELOC_NONE: type = "NONE"; break;
22052 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
22053 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
22054 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
22055 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
22056 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
22057 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
22058 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
22059 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
22060 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
22061 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
22062 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
22063 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
22064 default: type = _("<unknown>"); break;
22066 as_bad_where (fixp->fx_file, fixp->fx_line,
22067 _("cannot represent %s relocation in this object file format"),
22074 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
22076 && fixp->fx_addsy == GOT_symbol)
22078 code = BFD_RELOC_ARM_GOTPC;
22079 reloc->addend = fixp->fx_offset = reloc->address;
22083 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
22085 if (reloc->howto == NULL)
22087 as_bad_where (fixp->fx_file, fixp->fx_line,
22088 _("cannot represent %s relocation in this object file format"),
22089 bfd_get_reloc_code_name (code));
22093 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
22094 vtable entry to be used in the relocation's section offset. */
22095 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22096 reloc->address = fixp->fx_offset;
22101 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
22104 cons_fix_new_arm (fragS * frag,
22109 bfd_reloc_code_real_type type;
22113 FIXME: @@ Should look at CPU word size. */
22117 type = BFD_RELOC_8;
22120 type = BFD_RELOC_16;
22124 type = BFD_RELOC_32;
22127 type = BFD_RELOC_64;
22132 if (exp->X_op == O_secrel)
22134 exp->X_op = O_symbol;
22135 type = BFD_RELOC_32_SECREL;
22139 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
22142 #if defined (OBJ_COFF)
22144 arm_validate_fix (fixS * fixP)
22146 /* If the destination of the branch is a defined symbol which does not have
22147 the THUMB_FUNC attribute, then we must be calling a function which has
22148 the (interfacearm) attribute. We look for the Thumb entry point to that
22149 function and change the branch to refer to that function instead. */
22150 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
22151 && fixP->fx_addsy != NULL
22152 && S_IS_DEFINED (fixP->fx_addsy)
22153 && ! THUMB_IS_FUNC (fixP->fx_addsy))
22155 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
22162 arm_force_relocation (struct fix * fixp)
22164 #if defined (OBJ_COFF) && defined (TE_PE)
22165 if (fixp->fx_r_type == BFD_RELOC_RVA)
22169 /* In case we have a call or a branch to a function in ARM ISA mode from
22170 a thumb function or vice-versa force the relocation. These relocations
22171 are cleared off for some cores that might have blx and simple transformations
22175 switch (fixp->fx_r_type)
22177 case BFD_RELOC_ARM_PCREL_JUMP:
22178 case BFD_RELOC_ARM_PCREL_CALL:
22179 case BFD_RELOC_THUMB_PCREL_BLX:
22180 if (THUMB_IS_FUNC (fixp->fx_addsy))
22184 case BFD_RELOC_ARM_PCREL_BLX:
22185 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22186 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22187 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22188 if (ARM_IS_FUNC (fixp->fx_addsy))
22197 /* Resolve these relocations even if the symbol is extern or weak.
22198 Technically this is probably wrong due to symbol preemption.
22199 In practice these relocations do not have enough range to be useful
22200 at dynamic link time, and some code (e.g. in the Linux kernel)
22201 expects these references to be resolved. */
22202 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
22203 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
22204 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
22205 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
22206 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22207 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
22208 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
22209 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
22210 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22211 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
22212 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
22213 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
22214 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
22215 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
22218 /* Always leave these relocations for the linker. */
22219 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22220 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22221 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22224 /* Always generate relocations against function symbols. */
22225 if (fixp->fx_r_type == BFD_RELOC_32
22227 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
22230 return generic_force_reloc (fixp);
22233 #if defined (OBJ_ELF) || defined (OBJ_COFF)
22234 /* Relocations against function names must be left unadjusted,
22235 so that the linker can use this information to generate interworking
22236 stubs. The MIPS version of this function
22237 also prevents relocations that are mips-16 specific, but I do not
22238 know why it does this.
22241 There is one other problem that ought to be addressed here, but
22242 which currently is not: Taking the address of a label (rather
22243 than a function) and then later jumping to that address. Such
22244 addresses also ought to have their bottom bit set (assuming that
22245 they reside in Thumb code), but at the moment they will not. */
22248 arm_fix_adjustable (fixS * fixP)
22250 if (fixP->fx_addsy == NULL)
22253 /* Preserve relocations against symbols with function type. */
22254 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
22257 if (THUMB_IS_FUNC (fixP->fx_addsy)
22258 && fixP->fx_subsy == NULL)
22261 /* We need the symbol name for the VTABLE entries. */
22262 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
22263 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
22266 /* Don't allow symbols to be discarded on GOT related relocs. */
22267 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
22268 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
22269 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
22270 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
22271 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
22272 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
22273 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
22274 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
22275 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
22276 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
22277 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
22278 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
22279 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
22280 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
22283 /* Similarly for group relocations. */
22284 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
22285 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
22286 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
22289 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
22290 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
22291 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22292 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
22293 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
22294 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22295 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
22296 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
22297 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
22302 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
22307 elf32_arm_target_format (void)
22310 return (target_big_endian
22311 ? "elf32-bigarm-symbian"
22312 : "elf32-littlearm-symbian");
22313 #elif defined (TE_VXWORKS)
22314 return (target_big_endian
22315 ? "elf32-bigarm-vxworks"
22316 : "elf32-littlearm-vxworks");
22317 #elif defined (TE_NACL)
22318 return (target_big_endian
22319 ? "elf32-bigarm-nacl"
22320 : "elf32-littlearm-nacl");
22322 if (target_big_endian)
22323 return "elf32-bigarm";
22325 return "elf32-littlearm";
22330 armelf_frob_symbol (symbolS * symp,
22333 elf_frob_symbol (symp, puntp);
22337 /* MD interface: Finalization. */
22342 literal_pool * pool;
22344 /* Ensure that all the IT blocks are properly closed. */
22345 check_it_blocks_finished ();
22347 for (pool = list_of_pools; pool; pool = pool->next)
22349 /* Put it at the end of the relevant section. */
22350 subseg_set (pool->section, pool->sub_section);
22352 arm_elf_change_section ();
22359 /* Remove any excess mapping symbols generated for alignment frags in
22360 SEC. We may have created a mapping symbol before a zero byte
22361 alignment; remove it if there's a mapping symbol after the
22364 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
22365 void *dummy ATTRIBUTE_UNUSED)
22367 segment_info_type *seginfo = seg_info (sec);
22370 if (seginfo == NULL || seginfo->frchainP == NULL)
22373 for (fragp = seginfo->frchainP->frch_root;
22375 fragp = fragp->fr_next)
22377 symbolS *sym = fragp->tc_frag_data.last_map;
22378 fragS *next = fragp->fr_next;
22380 /* Variable-sized frags have been converted to fixed size by
22381 this point. But if this was variable-sized to start with,
22382 there will be a fixed-size frag after it. So don't handle
22384 if (sym == NULL || next == NULL)
22387 if (S_GET_VALUE (sym) < next->fr_address)
22388 /* Not at the end of this frag. */
22390 know (S_GET_VALUE (sym) == next->fr_address);
22394 if (next->tc_frag_data.first_map != NULL)
22396 /* Next frag starts with a mapping symbol. Discard this
22398 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22402 if (next->fr_next == NULL)
22404 /* This mapping symbol is at the end of the section. Discard
22406 know (next->fr_fix == 0 && next->fr_var == 0);
22407 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
22411 /* As long as we have empty frags without any mapping symbols,
22413 /* If the next frag is non-empty and does not start with a
22414 mapping symbol, then this mapping symbol is required. */
22415 if (next->fr_address != next->fr_next->fr_address)
22418 next = next->fr_next;
22420 while (next != NULL);
22425 /* Adjust the symbol table. This marks Thumb symbols as distinct from
22429 arm_adjust_symtab (void)
22434 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22436 if (ARM_IS_THUMB (sym))
22438 if (THUMB_IS_FUNC (sym))
22440 /* Mark the symbol as a Thumb function. */
22441 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
22442 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
22443 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
22445 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
22446 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
22448 as_bad (_("%s: unexpected function type: %d"),
22449 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
22451 else switch (S_GET_STORAGE_CLASS (sym))
22454 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
22457 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
22460 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
22468 if (ARM_IS_INTERWORK (sym))
22469 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
22476 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
22478 if (ARM_IS_THUMB (sym))
22480 elf_symbol_type * elf_sym;
22482 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
22483 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
22485 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
22486 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
22488 /* If it's a .thumb_func, declare it as so,
22489 otherwise tag label as .code 16. */
22490 if (THUMB_IS_FUNC (sym))
22491 elf_sym->internal_elf_sym.st_target_internal
22492 = ST_BRANCH_TO_THUMB;
22493 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
22494 elf_sym->internal_elf_sym.st_info =
22495 ELF_ST_INFO (bind, STT_ARM_16BIT);
22500 /* Remove any overlapping mapping symbols generated by alignment frags. */
22501 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
22502 /* Now do generic ELF adjustments. */
22503 elf_adjust_symtab ();
22507 /* MD interface: Initialization. */
22510 set_constant_flonums (void)
22514 for (i = 0; i < NUM_FLOAT_VALS; i++)
22515 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
22519 /* Auto-select Thumb mode if it's the only available instruction set for the
22520 given architecture. */
22523 autoselect_thumb_from_cpu_variant (void)
22525 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
22526 opcode_select (16);
22535 if ( (arm_ops_hsh = hash_new ()) == NULL
22536 || (arm_cond_hsh = hash_new ()) == NULL
22537 || (arm_shift_hsh = hash_new ()) == NULL
22538 || (arm_psr_hsh = hash_new ()) == NULL
22539 || (arm_v7m_psr_hsh = hash_new ()) == NULL
22540 || (arm_reg_hsh = hash_new ()) == NULL
22541 || (arm_reloc_hsh = hash_new ()) == NULL
22542 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
22543 as_fatal (_("virtual memory exhausted"));
22545 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
22546 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
22547 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
22548 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
22549 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
22550 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
22551 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
22552 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
22553 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
22554 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
22555 (void *) (v7m_psrs + i));
22556 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
22557 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
22559 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
22561 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
22562 (void *) (barrier_opt_names + i));
22564 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
22566 struct reloc_entry * entry = reloc_names + i;
22568 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
22569 /* This makes encode_branch() use the EABI versions of this relocation. */
22570 entry->reloc = BFD_RELOC_UNUSED;
22572 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
22576 set_constant_flonums ();
22578 /* Set the cpu variant based on the command-line options. We prefer
22579 -mcpu= over -march= if both are set (as for GCC); and we prefer
22580 -mfpu= over any other way of setting the floating point unit.
22581 Use of legacy options with new options are faulted. */
22584 if (mcpu_cpu_opt || march_cpu_opt)
22585 as_bad (_("use of old and new-style options to set CPU type"));
22587 mcpu_cpu_opt = legacy_cpu;
22589 else if (!mcpu_cpu_opt)
22590 mcpu_cpu_opt = march_cpu_opt;
22595 as_bad (_("use of old and new-style options to set FPU type"));
22597 mfpu_opt = legacy_fpu;
22599 else if (!mfpu_opt)
22601 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
22602 || defined (TE_NetBSD) || defined (TE_VXWORKS))
22603 /* Some environments specify a default FPU. If they don't, infer it
22604 from the processor. */
22606 mfpu_opt = mcpu_fpu_opt;
22608 mfpu_opt = march_fpu_opt;
22610 mfpu_opt = &fpu_default;
22616 if (mcpu_cpu_opt != NULL)
22617 mfpu_opt = &fpu_default;
22618 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
22619 mfpu_opt = &fpu_arch_vfp_v2;
22621 mfpu_opt = &fpu_arch_fpa;
22627 mcpu_cpu_opt = &cpu_default;
22628 selected_cpu = cpu_default;
22632 selected_cpu = *mcpu_cpu_opt;
22634 mcpu_cpu_opt = &arm_arch_any;
22637 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
22639 autoselect_thumb_from_cpu_variant ();
22641 arm_arch_used = thumb_arch_used = arm_arch_none;
22643 #if defined OBJ_COFF || defined OBJ_ELF
22645 unsigned int flags = 0;
22647 #if defined OBJ_ELF
22648 flags = meabi_flags;
22650 switch (meabi_flags)
22652 case EF_ARM_EABI_UNKNOWN:
22654 /* Set the flags in the private structure. */
22655 if (uses_apcs_26) flags |= F_APCS26;
22656 if (support_interwork) flags |= F_INTERWORK;
22657 if (uses_apcs_float) flags |= F_APCS_FLOAT;
22658 if (pic_code) flags |= F_PIC;
22659 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
22660 flags |= F_SOFT_FLOAT;
22662 switch (mfloat_abi_opt)
22664 case ARM_FLOAT_ABI_SOFT:
22665 case ARM_FLOAT_ABI_SOFTFP:
22666 flags |= F_SOFT_FLOAT;
22669 case ARM_FLOAT_ABI_HARD:
22670 if (flags & F_SOFT_FLOAT)
22671 as_bad (_("hard-float conflicts with specified fpu"));
22675 /* Using pure-endian doubles (even if soft-float). */
22676 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
22677 flags |= F_VFP_FLOAT;
22679 #if defined OBJ_ELF
22680 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
22681 flags |= EF_ARM_MAVERICK_FLOAT;
22684 case EF_ARM_EABI_VER4:
22685 case EF_ARM_EABI_VER5:
22686 /* No additional flags to set. */
22693 bfd_set_private_flags (stdoutput, flags);
22695 /* We have run out flags in the COFF header to encode the
22696 status of ATPCS support, so instead we create a dummy,
22697 empty, debug section called .arm.atpcs. */
22702 sec = bfd_make_section (stdoutput, ".arm.atpcs");
22706 bfd_set_section_flags
22707 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
22708 bfd_set_section_size (stdoutput, sec, 0);
22709 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
22715 /* Record the CPU type as well. */
22716 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
22717 mach = bfd_mach_arm_iWMMXt2;
22718 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
22719 mach = bfd_mach_arm_iWMMXt;
22720 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
22721 mach = bfd_mach_arm_XScale;
22722 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
22723 mach = bfd_mach_arm_ep9312;
22724 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
22725 mach = bfd_mach_arm_5TE;
22726 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
22728 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22729 mach = bfd_mach_arm_5T;
22731 mach = bfd_mach_arm_5;
22733 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
22735 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
22736 mach = bfd_mach_arm_4T;
22738 mach = bfd_mach_arm_4;
22740 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
22741 mach = bfd_mach_arm_3M;
22742 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
22743 mach = bfd_mach_arm_3;
22744 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
22745 mach = bfd_mach_arm_2a;
22746 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
22747 mach = bfd_mach_arm_2;
22749 mach = bfd_mach_arm_unknown;
22751 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
22754 /* Command line processing. */
22757 Invocation line includes a switch not recognized by the base assembler.
22758 See if it's a processor-specific option.
22760 This routine is somewhat complicated by the need for backwards
22761 compatibility (since older releases of gcc can't be changed).
22762 The new options try to make the interface as compatible as
22765 New options (supported) are:
22767 -mcpu=<cpu name> Assemble for selected processor
22768 -march=<architecture name> Assemble for selected architecture
22769 -mfpu=<fpu architecture> Assemble for selected FPU.
22770 -EB/-mbig-endian Big-endian
22771 -EL/-mlittle-endian Little-endian
22772 -k Generate PIC code
22773 -mthumb Start in Thumb mode
22774 -mthumb-interwork Code supports ARM/Thumb interworking
22776 -m[no-]warn-deprecated Warn about deprecated features
22778 For now we will also provide support for:
22780 -mapcs-32 32-bit Program counter
22781 -mapcs-26 26-bit Program counter
22782 -macps-float Floats passed in FP registers
22783 -mapcs-reentrant Reentrant code
22785 (sometime these will probably be replaced with -mapcs=<list of options>
22786 and -matpcs=<list of options>)
22788 The remaining options are only supported for back-wards compatibility.
22789 Cpu variants, the arm part is optional:
22790 -m[arm]1 Currently not supported.
22791 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
22792 -m[arm]3 Arm 3 processor
22793 -m[arm]6[xx], Arm 6 processors
22794 -m[arm]7[xx][t][[d]m] Arm 7 processors
22795 -m[arm]8[10] Arm 8 processors
22796 -m[arm]9[20][tdmi] Arm 9 processors
22797 -mstrongarm[110[0]] StrongARM processors
22798 -mxscale XScale processors
22799 -m[arm]v[2345[t[e]]] Arm architectures
22800 -mall All (except the ARM1)
22802 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
22803 -mfpe-old (No float load/store multiples)
22804 -mvfpxd VFP Single precision
22806 -mno-fpu Disable all floating point instructions
22808 The following CPU names are recognized:
22809 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
22810 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
22811 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
22812 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
22813 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
22814 arm10t arm10e, arm1020t, arm1020e, arm10200e,
22815 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
22819 const char * md_shortopts = "m:k";
22821 #ifdef ARM_BI_ENDIAN
22822 #define OPTION_EB (OPTION_MD_BASE + 0)
22823 #define OPTION_EL (OPTION_MD_BASE + 1)
22825 #if TARGET_BYTES_BIG_ENDIAN
22826 #define OPTION_EB (OPTION_MD_BASE + 0)
22828 #define OPTION_EL (OPTION_MD_BASE + 1)
22831 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
22833 struct option md_longopts[] =
22836 {"EB", no_argument, NULL, OPTION_EB},
22839 {"EL", no_argument, NULL, OPTION_EL},
22841 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
22842 {NULL, no_argument, NULL, 0}
22845 size_t md_longopts_size = sizeof (md_longopts);
22847 struct arm_option_table
22849 char *option; /* Option name to match. */
22850 char *help; /* Help information. */
22851 int *var; /* Variable to change. */
22852 int value; /* What to change it to. */
22853 char *deprecated; /* If non-null, print this message. */
22856 struct arm_option_table arm_opts[] =
22858 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
22859 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
22860 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
22861 &support_interwork, 1, NULL},
22862 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
22863 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
22864 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
22866 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
22867 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
22868 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
22869 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
22872 /* These are recognized by the assembler, but have no affect on code. */
22873 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
22874 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
22876 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
22877 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
22878 &warn_on_deprecated, 0, NULL},
22879 {NULL, NULL, NULL, 0, NULL}
22882 struct arm_legacy_option_table
22884 char *option; /* Option name to match. */
22885 const arm_feature_set **var; /* Variable to change. */
22886 const arm_feature_set value; /* What to change it to. */
22887 char *deprecated; /* If non-null, print this message. */
22890 const struct arm_legacy_option_table arm_legacy_opts[] =
22892 /* DON'T add any new processors to this list -- we want the whole list
22893 to go away... Add them to the processors table instead. */
22894 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
22895 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
22896 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
22897 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
22898 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22899 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
22900 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22901 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
22902 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
22903 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
22904 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
22905 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
22906 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
22907 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
22908 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
22909 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
22910 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
22911 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
22912 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
22913 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
22914 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
22915 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
22916 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
22917 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
22918 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
22919 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
22920 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
22921 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
22922 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
22923 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
22924 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
22925 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
22926 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
22927 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
22928 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22929 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
22930 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22931 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
22932 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22933 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
22934 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
22935 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
22936 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
22937 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
22938 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
22939 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
22940 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22941 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22942 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22943 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
22944 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22945 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
22946 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22947 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
22948 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22949 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
22950 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
22951 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
22952 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
22953 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
22954 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22955 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
22956 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22957 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
22958 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22959 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
22960 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22961 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
22962 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
22963 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
22964 N_("use -mcpu=strongarm110")},
22965 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
22966 N_("use -mcpu=strongarm1100")},
22967 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
22968 N_("use -mcpu=strongarm1110")},
22969 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
22970 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
22971 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
22973 /* Architecture variants -- don't add any more to this list either. */
22974 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
22975 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
22976 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22977 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
22978 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
22979 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
22980 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22981 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
22982 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
22983 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
22984 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22985 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
22986 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
22987 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
22988 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22989 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
22990 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22991 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
22993 /* Floating point variants -- don't add any more to this list either. */
22994 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
22995 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
22996 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
22997 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
22998 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
23000 {NULL, NULL, ARM_ARCH_NONE, NULL}
23003 struct arm_cpu_option_table
23007 const arm_feature_set value;
23008 /* For some CPUs we assume an FPU unless the user explicitly sets
23010 const arm_feature_set default_fpu;
23011 /* The canonical name of the CPU, or NULL to use NAME converted to upper
23013 const char *canonical_name;
23016 /* This list should, at a minimum, contain all the cpu names
23017 recognized by GCC. */
23018 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
23019 static const struct arm_cpu_option_table arm_cpus[] =
23021 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
23022 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
23023 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
23024 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23025 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
23026 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23027 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23028 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23029 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23030 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23031 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23032 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23033 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23034 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23035 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23036 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
23037 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23038 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23039 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23040 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23041 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23042 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23043 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23044 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23045 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23046 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23047 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23048 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
23049 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23050 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23051 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23052 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23053 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23054 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23055 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23056 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23057 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23058 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23059 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23060 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
23061 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23062 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23063 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23064 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
23065 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23066 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
23067 /* For V5 or later processors we default to using VFP; but the user
23068 should really set the FPU type explicitly. */
23069 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23070 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23071 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23072 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
23073 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23074 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23075 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
23076 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23077 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
23078 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
23079 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23080 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23081 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23082 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23083 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23084 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
23085 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
23086 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23087 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23088 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
23090 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
23091 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23092 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23093 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23094 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23095 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
23096 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
23097 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
23098 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
23100 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
23101 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
23102 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
23103 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
23104 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
23105 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
23106 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
23107 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
23108 FPU_NONE, "Cortex-A5"),
23109 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23110 FPU_ARCH_NEON_VFP_V4,
23112 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
23113 ARM_FEATURE (0, FPU_VFP_V3
23114 | FPU_NEON_EXT_V1),
23116 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
23117 ARM_FEATURE (0, FPU_VFP_V3
23118 | FPU_NEON_EXT_V1),
23120 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7A_IDIV_MP_SEC_VIRT,
23121 FPU_ARCH_NEON_VFP_V4,
23123 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
23124 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
23126 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
23127 FPU_NONE, "Cortex-R5"),
23128 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
23129 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
23130 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
23131 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
23132 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
23133 /* ??? XSCALE is really an architecture. */
23134 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23135 /* ??? iwmmxt is not a processor. */
23136 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
23137 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
23138 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
23140 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
23143 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
23147 struct arm_arch_option_table
23151 const arm_feature_set value;
23152 const arm_feature_set default_fpu;
23155 /* This list should, at a minimum, contain all the architecture names
23156 recognized by GCC. */
23157 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
23158 static const struct arm_arch_option_table arm_archs[] =
23160 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
23161 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
23162 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
23163 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
23164 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
23165 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
23166 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
23167 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
23168 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
23169 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
23170 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
23171 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
23172 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
23173 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
23174 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
23175 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
23176 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
23177 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
23178 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
23179 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
23180 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
23181 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
23182 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
23183 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
23184 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
23185 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
23186 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
23187 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
23188 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
23189 /* The official spelling of the ARMv7 profile variants is the dashed form.
23190 Accept the non-dashed form for compatibility with old toolchains. */
23191 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
23192 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
23193 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
23194 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
23195 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
23196 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
23197 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
23198 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
23199 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
23200 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
23201 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
23202 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23204 #undef ARM_ARCH_OPT
23206 /* ISA extensions in the co-processor and main instruction set space. */
23207 struct arm_option_extension_value_table
23211 const arm_feature_set value;
23212 const arm_feature_set allowed_archs;
23215 /* The following table must be in alphabetical order with a NULL last entry.
23217 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
23218 static const struct arm_option_extension_value_table arm_extensions[] =
23220 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
23221 ARM_FEATURE (ARM_EXT_V8, 0)),
23222 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
23223 ARM_FEATURE (ARM_EXT_V8, 0)),
23224 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
23225 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23226 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
23227 ARM_EXT_OPT ("iwmmxt2",
23228 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
23229 ARM_EXT_OPT ("maverick",
23230 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
23231 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
23232 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
23233 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
23234 ARM_FEATURE (ARM_EXT_V8, 0)),
23235 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
23236 ARM_FEATURE (ARM_EXT_V6M, 0)),
23237 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
23238 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
23239 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
23241 ARM_FEATURE (ARM_EXT_V7A, 0)),
23242 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
23243 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
23247 /* ISA floating-point and Advanced SIMD extensions. */
23248 struct arm_option_fpu_value_table
23251 const arm_feature_set value;
23254 /* This list should, at a minimum, contain all the fpu names
23255 recognized by GCC. */
23256 static const struct arm_option_fpu_value_table arm_fpus[] =
23258 {"softfpa", FPU_NONE},
23259 {"fpe", FPU_ARCH_FPE},
23260 {"fpe2", FPU_ARCH_FPE},
23261 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
23262 {"fpa", FPU_ARCH_FPA},
23263 {"fpa10", FPU_ARCH_FPA},
23264 {"fpa11", FPU_ARCH_FPA},
23265 {"arm7500fe", FPU_ARCH_FPA},
23266 {"softvfp", FPU_ARCH_VFP},
23267 {"softvfp+vfp", FPU_ARCH_VFP_V2},
23268 {"vfp", FPU_ARCH_VFP_V2},
23269 {"vfp9", FPU_ARCH_VFP_V2},
23270 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
23271 {"vfp10", FPU_ARCH_VFP_V2},
23272 {"vfp10-r0", FPU_ARCH_VFP_V1},
23273 {"vfpxd", FPU_ARCH_VFP_V1xD},
23274 {"vfpv2", FPU_ARCH_VFP_V2},
23275 {"vfpv3", FPU_ARCH_VFP_V3},
23276 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
23277 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
23278 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
23279 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
23280 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
23281 {"arm1020t", FPU_ARCH_VFP_V1},
23282 {"arm1020e", FPU_ARCH_VFP_V2},
23283 {"arm1136jfs", FPU_ARCH_VFP_V2},
23284 {"arm1136jf-s", FPU_ARCH_VFP_V2},
23285 {"maverick", FPU_ARCH_MAVERICK},
23286 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
23287 {"neon-fp16", FPU_ARCH_NEON_FP16},
23288 {"vfpv4", FPU_ARCH_VFP_V4},
23289 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
23290 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
23291 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
23292 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
23293 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
23294 {"crypto-neon-fp-armv8",
23295 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
23296 {NULL, ARM_ARCH_NONE}
23299 struct arm_option_value_table
23305 static const struct arm_option_value_table arm_float_abis[] =
23307 {"hard", ARM_FLOAT_ABI_HARD},
23308 {"softfp", ARM_FLOAT_ABI_SOFTFP},
23309 {"soft", ARM_FLOAT_ABI_SOFT},
23314 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
23315 static const struct arm_option_value_table arm_eabis[] =
23317 {"gnu", EF_ARM_EABI_UNKNOWN},
23318 {"4", EF_ARM_EABI_VER4},
23319 {"5", EF_ARM_EABI_VER5},
23324 struct arm_long_option_table
23326 char * option; /* Substring to match. */
23327 char * help; /* Help information. */
23328 int (* func) (char * subopt); /* Function to decode sub-option. */
23329 char * deprecated; /* If non-null, print this message. */
23333 arm_parse_extension (char *str, const arm_feature_set **opt_p)
23335 arm_feature_set *ext_set = (arm_feature_set *)
23336 xmalloc (sizeof (arm_feature_set));
23338 /* We insist on extensions being specified in alphabetical order, and with
23339 extensions being added before being removed. We achieve this by having
23340 the global ARM_EXTENSIONS table in alphabetical order, and using the
23341 ADDING_VALUE variable to indicate whether we are adding an extension (1)
23342 or removing it (0) and only allowing it to change in the order
23344 const struct arm_option_extension_value_table * opt = NULL;
23345 int adding_value = -1;
23347 /* Copy the feature set, so that we can modify it. */
23348 *ext_set = **opt_p;
23351 while (str != NULL && *str != 0)
23358 as_bad (_("invalid architectural extension"));
23363 ext = strchr (str, '+');
23368 len = strlen (str);
23370 if (len >= 2 && strncmp (str, "no", 2) == 0)
23372 if (adding_value != 0)
23375 opt = arm_extensions;
23383 if (adding_value == -1)
23386 opt = arm_extensions;
23388 else if (adding_value != 1)
23390 as_bad (_("must specify extensions to add before specifying "
23391 "those to remove"));
23398 as_bad (_("missing architectural extension"));
23402 gas_assert (adding_value != -1);
23403 gas_assert (opt != NULL);
23405 /* Scan over the options table trying to find an exact match. */
23406 for (; opt->name != NULL; opt++)
23407 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23409 /* Check we can apply the extension to this architecture. */
23410 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
23412 as_bad (_("extension does not apply to the base architecture"));
23416 /* Add or remove the extension. */
23418 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
23420 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
23425 if (opt->name == NULL)
23427 /* Did we fail to find an extension because it wasn't specified in
23428 alphabetical order, or because it does not exist? */
23430 for (opt = arm_extensions; opt->name != NULL; opt++)
23431 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23434 if (opt->name == NULL)
23435 as_bad (_("unknown architectural extension `%s'"), str);
23437 as_bad (_("architectural extensions must be specified in "
23438 "alphabetical order"));
23444 /* We should skip the extension we've just matched the next time
23456 arm_parse_cpu (char *str)
23458 const struct arm_cpu_option_table *opt;
23459 char *ext = strchr (str, '+');
23465 len = strlen (str);
23469 as_bad (_("missing cpu name `%s'"), str);
23473 for (opt = arm_cpus; opt->name != NULL; opt++)
23474 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23476 mcpu_cpu_opt = &opt->value;
23477 mcpu_fpu_opt = &opt->default_fpu;
23478 if (opt->canonical_name)
23479 strcpy (selected_cpu_name, opt->canonical_name);
23484 for (i = 0; i < len; i++)
23485 selected_cpu_name[i] = TOUPPER (opt->name[i]);
23486 selected_cpu_name[i] = 0;
23490 return arm_parse_extension (ext, &mcpu_cpu_opt);
23495 as_bad (_("unknown cpu `%s'"), str);
23500 arm_parse_arch (char *str)
23502 const struct arm_arch_option_table *opt;
23503 char *ext = strchr (str, '+');
23509 len = strlen (str);
23513 as_bad (_("missing architecture name `%s'"), str);
23517 for (opt = arm_archs; opt->name != NULL; opt++)
23518 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
23520 march_cpu_opt = &opt->value;
23521 march_fpu_opt = &opt->default_fpu;
23522 strcpy (selected_cpu_name, opt->name);
23525 return arm_parse_extension (ext, &march_cpu_opt);
23530 as_bad (_("unknown architecture `%s'\n"), str);
23535 arm_parse_fpu (char * str)
23537 const struct arm_option_fpu_value_table * opt;
23539 for (opt = arm_fpus; opt->name != NULL; opt++)
23540 if (streq (opt->name, str))
23542 mfpu_opt = &opt->value;
23546 as_bad (_("unknown floating point format `%s'\n"), str);
23551 arm_parse_float_abi (char * str)
23553 const struct arm_option_value_table * opt;
23555 for (opt = arm_float_abis; opt->name != NULL; opt++)
23556 if (streq (opt->name, str))
23558 mfloat_abi_opt = opt->value;
23562 as_bad (_("unknown floating point abi `%s'\n"), str);
23568 arm_parse_eabi (char * str)
23570 const struct arm_option_value_table *opt;
23572 for (opt = arm_eabis; opt->name != NULL; opt++)
23573 if (streq (opt->name, str))
23575 meabi_flags = opt->value;
23578 as_bad (_("unknown EABI `%s'\n"), str);
23584 arm_parse_it_mode (char * str)
23586 bfd_boolean ret = TRUE;
23588 if (streq ("arm", str))
23589 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
23590 else if (streq ("thumb", str))
23591 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
23592 else if (streq ("always", str))
23593 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
23594 else if (streq ("never", str))
23595 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
23598 as_bad (_("unknown implicit IT mode `%s', should be "\
23599 "arm, thumb, always, or never."), str);
23606 struct arm_long_option_table arm_long_opts[] =
23608 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
23609 arm_parse_cpu, NULL},
23610 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
23611 arm_parse_arch, NULL},
23612 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
23613 arm_parse_fpu, NULL},
23614 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
23615 arm_parse_float_abi, NULL},
23617 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
23618 arm_parse_eabi, NULL},
23620 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
23621 arm_parse_it_mode, NULL},
23622 {NULL, NULL, 0, NULL}
23626 md_parse_option (int c, char * arg)
23628 struct arm_option_table *opt;
23629 const struct arm_legacy_option_table *fopt;
23630 struct arm_long_option_table *lopt;
23636 target_big_endian = 1;
23642 target_big_endian = 0;
23646 case OPTION_FIX_V4BX:
23651 /* Listing option. Just ignore these, we don't support additional
23656 for (opt = arm_opts; opt->option != NULL; opt++)
23658 if (c == opt->option[0]
23659 && ((arg == NULL && opt->option[1] == 0)
23660 || streq (arg, opt->option + 1)))
23662 /* If the option is deprecated, tell the user. */
23663 if (warn_on_deprecated && opt->deprecated != NULL)
23664 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23665 arg ? arg : "", _(opt->deprecated));
23667 if (opt->var != NULL)
23668 *opt->var = opt->value;
23674 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
23676 if (c == fopt->option[0]
23677 && ((arg == NULL && fopt->option[1] == 0)
23678 || streq (arg, fopt->option + 1)))
23680 /* If the option is deprecated, tell the user. */
23681 if (warn_on_deprecated && fopt->deprecated != NULL)
23682 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
23683 arg ? arg : "", _(fopt->deprecated));
23685 if (fopt->var != NULL)
23686 *fopt->var = &fopt->value;
23692 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23694 /* These options are expected to have an argument. */
23695 if (c == lopt->option[0]
23697 && strncmp (arg, lopt->option + 1,
23698 strlen (lopt->option + 1)) == 0)
23700 /* If the option is deprecated, tell the user. */
23701 if (warn_on_deprecated && lopt->deprecated != NULL)
23702 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
23703 _(lopt->deprecated));
23705 /* Call the sup-option parser. */
23706 return lopt->func (arg + strlen (lopt->option) - 1);
23717 md_show_usage (FILE * fp)
23719 struct arm_option_table *opt;
23720 struct arm_long_option_table *lopt;
23722 fprintf (fp, _(" ARM-specific assembler options:\n"));
23724 for (opt = arm_opts; opt->option != NULL; opt++)
23725 if (opt->help != NULL)
23726 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
23728 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
23729 if (lopt->help != NULL)
23730 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
23734 -EB assemble code for a big-endian cpu\n"));
23739 -EL assemble code for a little-endian cpu\n"));
23743 --fix-v4bx Allow BX in ARMv4 code\n"));
23751 arm_feature_set flags;
23752 } cpu_arch_ver_table;
23754 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
23755 least features first. */
23756 static const cpu_arch_ver_table cpu_arch_ver[] =
23762 {4, ARM_ARCH_V5TE},
23763 {5, ARM_ARCH_V5TEJ},
23767 {11, ARM_ARCH_V6M},
23768 {12, ARM_ARCH_V6SM},
23769 {8, ARM_ARCH_V6T2},
23770 {10, ARM_ARCH_V7A_IDIV_MP_SEC_VIRT},
23771 {10, ARM_ARCH_V7R},
23772 {10, ARM_ARCH_V7M},
23773 {14, ARM_ARCH_V8A},
23777 /* Set an attribute if it has not already been set by the user. */
23779 aeabi_set_attribute_int (int tag, int value)
23782 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23783 || !attributes_set_explicitly[tag])
23784 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
23788 aeabi_set_attribute_string (int tag, const char *value)
23791 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
23792 || !attributes_set_explicitly[tag])
23793 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
23796 /* Set the public EABI object attributes. */
23798 aeabi_set_public_attributes (void)
23803 int fp16_optional = 0;
23804 arm_feature_set flags;
23805 arm_feature_set tmp;
23806 const cpu_arch_ver_table *p;
23808 /* Choose the architecture based on the capabilities of the requested cpu
23809 (if any) and/or the instructions actually used. */
23810 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
23811 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
23812 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
23814 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
23815 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
23817 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
23818 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
23820 /* Allow the user to override the reported architecture. */
23823 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
23824 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
23827 /* We need to make sure that the attributes do not identify us as v6S-M
23828 when the only v6S-M feature in use is the Operating System Extensions. */
23829 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
23830 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
23831 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
23835 for (p = cpu_arch_ver; p->val; p++)
23837 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
23840 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
23844 /* The table lookup above finds the last architecture to contribute
23845 a new feature. Unfortunately, Tag13 is a subset of the union of
23846 v6T2 and v7-M, so it is never seen as contributing a new feature.
23847 We can not search for the last entry which is entirely used,
23848 because if no CPU is specified we build up only those flags
23849 actually used. Perhaps we should separate out the specified
23850 and implicit cases. Avoid taking this path for -march=all by
23851 checking for contradictory v7-A / v7-M features. */
23853 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
23854 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
23855 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
23858 /* Tag_CPU_name. */
23859 if (selected_cpu_name[0])
23863 q = selected_cpu_name;
23864 if (strncmp (q, "armv", 4) == 0)
23869 for (i = 0; q[i]; i++)
23870 q[i] = TOUPPER (q[i]);
23872 aeabi_set_attribute_string (Tag_CPU_name, q);
23875 /* Tag_CPU_arch. */
23876 aeabi_set_attribute_int (Tag_CPU_arch, arch);
23878 /* Tag_CPU_arch_profile. */
23879 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
23881 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
23883 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
23888 if (profile != '\0')
23889 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
23891 /* Tag_ARM_ISA_use. */
23892 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
23894 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
23896 /* Tag_THUMB_ISA_use. */
23897 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
23899 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
23900 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
23902 /* Tag_VFP_arch. */
23903 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
23904 aeabi_set_attribute_int (Tag_VFP_arch, 7);
23905 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
23906 aeabi_set_attribute_int (Tag_VFP_arch,
23907 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
23909 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
23912 aeabi_set_attribute_int (Tag_VFP_arch, 3);
23914 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
23916 aeabi_set_attribute_int (Tag_VFP_arch, 4);
23919 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
23920 aeabi_set_attribute_int (Tag_VFP_arch, 2);
23921 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
23922 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
23923 aeabi_set_attribute_int (Tag_VFP_arch, 1);
23925 /* Tag_ABI_HardFP_use. */
23926 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
23927 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
23928 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
23930 /* Tag_WMMX_arch. */
23931 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
23932 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
23933 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
23934 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
23936 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
23937 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
23938 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
23939 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
23941 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
23943 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
23947 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
23952 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
23953 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
23954 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
23958 We set Tag_DIV_use to two when integer divide instructions have been used
23959 in ARM state, or when Thumb integer divide instructions have been used,
23960 but we have no architecture profile set, nor have we any ARM instructions.
23962 For ARMv8 we set the tag to 0 as integer divide is implied by the base
23965 For new architectures we will have to check these tests. */
23966 gas_assert (arch <= TAG_CPU_ARCH_V8);
23967 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
23968 aeabi_set_attribute_int (Tag_DIV_use, 0);
23969 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
23970 || (profile == '\0'
23971 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
23972 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
23973 aeabi_set_attribute_int (Tag_DIV_use, 2);
23975 /* Tag_MP_extension_use. */
23976 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
23977 aeabi_set_attribute_int (Tag_MPextension_use, 1);
23979 /* Tag Virtualization_use. */
23980 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
23982 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
23985 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
23988 /* Add the default contents for the .ARM.attributes section. */
23992 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23995 aeabi_set_public_attributes ();
23997 #endif /* OBJ_ELF */
24000 /* Parse a .cpu directive. */
24003 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
24005 const struct arm_cpu_option_table *opt;
24009 name = input_line_pointer;
24010 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24011 input_line_pointer++;
24012 saved_char = *input_line_pointer;
24013 *input_line_pointer = 0;
24015 /* Skip the first "all" entry. */
24016 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
24017 if (streq (opt->name, name))
24019 mcpu_cpu_opt = &opt->value;
24020 selected_cpu = opt->value;
24021 if (opt->canonical_name)
24022 strcpy (selected_cpu_name, opt->canonical_name);
24026 for (i = 0; opt->name[i]; i++)
24027 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24029 selected_cpu_name[i] = 0;
24031 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24032 *input_line_pointer = saved_char;
24033 demand_empty_rest_of_line ();
24036 as_bad (_("unknown cpu `%s'"), name);
24037 *input_line_pointer = saved_char;
24038 ignore_rest_of_line ();
24042 /* Parse a .arch directive. */
24045 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
24047 const struct arm_arch_option_table *opt;
24051 name = input_line_pointer;
24052 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24053 input_line_pointer++;
24054 saved_char = *input_line_pointer;
24055 *input_line_pointer = 0;
24057 /* Skip the first "all" entry. */
24058 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24059 if (streq (opt->name, name))
24061 mcpu_cpu_opt = &opt->value;
24062 selected_cpu = opt->value;
24063 strcpy (selected_cpu_name, opt->name);
24064 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24065 *input_line_pointer = saved_char;
24066 demand_empty_rest_of_line ();
24070 as_bad (_("unknown architecture `%s'\n"), name);
24071 *input_line_pointer = saved_char;
24072 ignore_rest_of_line ();
24076 /* Parse a .object_arch directive. */
24079 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
24081 const struct arm_arch_option_table *opt;
24085 name = input_line_pointer;
24086 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24087 input_line_pointer++;
24088 saved_char = *input_line_pointer;
24089 *input_line_pointer = 0;
24091 /* Skip the first "all" entry. */
24092 for (opt = arm_archs + 1; opt->name != NULL; opt++)
24093 if (streq (opt->name, name))
24095 object_arch = &opt->value;
24096 *input_line_pointer = saved_char;
24097 demand_empty_rest_of_line ();
24101 as_bad (_("unknown architecture `%s'\n"), name);
24102 *input_line_pointer = saved_char;
24103 ignore_rest_of_line ();
24106 /* Parse a .arch_extension directive. */
24109 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
24111 const struct arm_option_extension_value_table *opt;
24114 int adding_value = 1;
24116 name = input_line_pointer;
24117 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24118 input_line_pointer++;
24119 saved_char = *input_line_pointer;
24120 *input_line_pointer = 0;
24122 if (strlen (name) >= 2
24123 && strncmp (name, "no", 2) == 0)
24129 for (opt = arm_extensions; opt->name != NULL; opt++)
24130 if (streq (opt->name, name))
24132 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
24134 as_bad (_("architectural extension `%s' is not allowed for the "
24135 "current base architecture"), name);
24140 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
24142 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
24144 mcpu_cpu_opt = &selected_cpu;
24145 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24146 *input_line_pointer = saved_char;
24147 demand_empty_rest_of_line ();
24151 if (opt->name == NULL)
24152 as_bad (_("unknown architecture `%s'\n"), name);
24154 *input_line_pointer = saved_char;
24155 ignore_rest_of_line ();
24158 /* Parse a .fpu directive. */
24161 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
24163 const struct arm_option_fpu_value_table *opt;
24167 name = input_line_pointer;
24168 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
24169 input_line_pointer++;
24170 saved_char = *input_line_pointer;
24171 *input_line_pointer = 0;
24173 for (opt = arm_fpus; opt->name != NULL; opt++)
24174 if (streq (opt->name, name))
24176 mfpu_opt = &opt->value;
24177 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
24178 *input_line_pointer = saved_char;
24179 demand_empty_rest_of_line ();
24183 as_bad (_("unknown floating point format `%s'\n"), name);
24184 *input_line_pointer = saved_char;
24185 ignore_rest_of_line ();
24188 /* Copy symbol information. */
24191 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
24193 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
24197 /* Given a symbolic attribute NAME, return the proper integer value.
24198 Returns -1 if the attribute is not known. */
24201 arm_convert_symbolic_attribute (const char *name)
24203 static const struct
24208 attribute_table[] =
24210 /* When you modify this table you should
24211 also modify the list in doc/c-arm.texi. */
24212 #define T(tag) {#tag, tag}
24213 T (Tag_CPU_raw_name),
24216 T (Tag_CPU_arch_profile),
24217 T (Tag_ARM_ISA_use),
24218 T (Tag_THUMB_ISA_use),
24222 T (Tag_Advanced_SIMD_arch),
24223 T (Tag_PCS_config),
24224 T (Tag_ABI_PCS_R9_use),
24225 T (Tag_ABI_PCS_RW_data),
24226 T (Tag_ABI_PCS_RO_data),
24227 T (Tag_ABI_PCS_GOT_use),
24228 T (Tag_ABI_PCS_wchar_t),
24229 T (Tag_ABI_FP_rounding),
24230 T (Tag_ABI_FP_denormal),
24231 T (Tag_ABI_FP_exceptions),
24232 T (Tag_ABI_FP_user_exceptions),
24233 T (Tag_ABI_FP_number_model),
24234 T (Tag_ABI_align_needed),
24235 T (Tag_ABI_align8_needed),
24236 T (Tag_ABI_align_preserved),
24237 T (Tag_ABI_align8_preserved),
24238 T (Tag_ABI_enum_size),
24239 T (Tag_ABI_HardFP_use),
24240 T (Tag_ABI_VFP_args),
24241 T (Tag_ABI_WMMX_args),
24242 T (Tag_ABI_optimization_goals),
24243 T (Tag_ABI_FP_optimization_goals),
24244 T (Tag_compatibility),
24245 T (Tag_CPU_unaligned_access),
24246 T (Tag_FP_HP_extension),
24247 T (Tag_VFP_HP_extension),
24248 T (Tag_ABI_FP_16bit_format),
24249 T (Tag_MPextension_use),
24251 T (Tag_nodefaults),
24252 T (Tag_also_compatible_with),
24253 T (Tag_conformance),
24255 T (Tag_Virtualization_use),
24256 /* We deliberately do not include Tag_MPextension_use_legacy. */
24264 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
24265 if (streq (name, attribute_table[i].name))
24266 return attribute_table[i].tag;
24272 /* Apply sym value for relocations only in the case that
24273 they are for local symbols and you have the respective
24274 architectural feature for blx and simple switches. */
24276 arm_apply_sym_value (struct fix * fixP)
24279 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
24280 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
24282 switch (fixP->fx_r_type)
24284 case BFD_RELOC_ARM_PCREL_BLX:
24285 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24286 if (ARM_IS_FUNC (fixP->fx_addsy))
24290 case BFD_RELOC_ARM_PCREL_CALL:
24291 case BFD_RELOC_THUMB_PCREL_BLX:
24292 if (THUMB_IS_FUNC (fixP->fx_addsy))
24303 #endif /* OBJ_ELF */