1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2014 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
80 /* Results from operand parsing worker functions. */
84 PARSE_OPERAND_SUCCESS,
86 PARSE_OPERAND_FAIL_NO_BACKTRACK
87 } parse_operand_result;
96 /* Types of processor to assemble for. */
98 /* The code that was here used to select a default CPU depending on compiler
99 pre-defines which were only present when doing native builds, thus
100 changing gas' default behaviour depending upon the build host.
102 If you have a target that requires a default CPU option then the you
103 should define CPU_DEFAULT here. */
108 # define FPU_DEFAULT FPU_ARCH_FPA
109 # elif defined (TE_NetBSD)
111 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
113 /* Legacy a.out format. */
114 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
116 # elif defined (TE_VXWORKS)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
119 /* For backwards compatibility, default to FPA. */
120 # define FPU_DEFAULT FPU_ARCH_FPA
122 #endif /* ifndef FPU_DEFAULT */
124 #define streq(a, b) (strcmp (a, b) == 0)
126 static arm_feature_set cpu_variant;
127 static arm_feature_set arm_arch_used;
128 static arm_feature_set thumb_arch_used;
130 /* Flags stored in private area of BFD structure. */
131 static int uses_apcs_26 = FALSE;
132 static int atpcs = FALSE;
133 static int support_interwork = FALSE;
134 static int uses_apcs_float = FALSE;
135 static int pic_code = FALSE;
136 static int fix_v4bx = FALSE;
137 /* Warn on using deprecated features. */
138 static int warn_on_deprecated = TRUE;
140 /* Understand CodeComposer Studio assembly syntax. */
141 bfd_boolean codecomposer_syntax = FALSE;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
154 static const arm_feature_set *object_arch = NULL;
156 /* Constants for known architecture features. */
157 static const arm_feature_set fpu_default = FPU_DEFAULT;
158 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
159 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
160 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
161 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
162 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
163 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
164 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
165 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
168 static const arm_feature_set cpu_default = CPU_DEFAULT;
171 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
173 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
174 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
175 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
176 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
177 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
178 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
179 static const arm_feature_set arm_ext_v4t_5 =
180 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
181 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
182 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
183 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
184 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
185 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
186 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6m = ARM_FEATURE (ARM_EXT_V6M, 0);
189 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
190 static const arm_feature_set arm_ext_v6_dsp = ARM_FEATURE (ARM_EXT_V6_DSP, 0);
191 static const arm_feature_set arm_ext_barrier = ARM_FEATURE (ARM_EXT_BARRIER, 0);
192 static const arm_feature_set arm_ext_msr = ARM_FEATURE (ARM_EXT_THUMB_MSR, 0);
193 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
194 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
195 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
196 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
197 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
198 static const arm_feature_set arm_ext_v8 = ARM_FEATURE (ARM_EXT_V8, 0);
199 static const arm_feature_set arm_ext_m =
200 ARM_FEATURE (ARM_EXT_V6M | ARM_EXT_OS | ARM_EXT_V7M, 0);
201 static const arm_feature_set arm_ext_mp = ARM_FEATURE (ARM_EXT_MP, 0);
202 static const arm_feature_set arm_ext_sec = ARM_FEATURE (ARM_EXT_SEC, 0);
203 static const arm_feature_set arm_ext_os = ARM_FEATURE (ARM_EXT_OS, 0);
204 static const arm_feature_set arm_ext_adiv = ARM_FEATURE (ARM_EXT_ADIV, 0);
205 static const arm_feature_set arm_ext_virt = ARM_FEATURE (ARM_EXT_VIRT, 0);
207 static const arm_feature_set arm_arch_any = ARM_ANY;
208 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
209 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
210 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_arch_v6m_only = ARM_ARCH_V6M_ONLY;
213 static const arm_feature_set arm_cext_iwmmxt2 =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
215 static const arm_feature_set arm_cext_iwmmxt =
216 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
217 static const arm_feature_set arm_cext_xscale =
218 ARM_FEATURE (0, ARM_CEXT_XSCALE);
219 static const arm_feature_set arm_cext_maverick =
220 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
221 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
222 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
223 static const arm_feature_set fpu_vfp_ext_v1xd =
224 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
225 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
226 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
227 static const arm_feature_set fpu_vfp_ext_v3xd = ARM_FEATURE (0, FPU_VFP_EXT_V3xD);
228 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
229 static const arm_feature_set fpu_vfp_ext_d32 =
230 ARM_FEATURE (0, FPU_VFP_EXT_D32);
231 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
232 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
233 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
234 static const arm_feature_set fpu_vfp_fp16 = ARM_FEATURE (0, FPU_VFP_EXT_FP16);
235 static const arm_feature_set fpu_neon_ext_fma = ARM_FEATURE (0, FPU_NEON_EXT_FMA);
236 static const arm_feature_set fpu_vfp_ext_fma = ARM_FEATURE (0, FPU_VFP_EXT_FMA);
237 static const arm_feature_set fpu_vfp_ext_armv8 =
238 ARM_FEATURE (0, FPU_VFP_EXT_ARMV8);
239 static const arm_feature_set fpu_neon_ext_armv8 =
240 ARM_FEATURE (0, FPU_NEON_EXT_ARMV8);
241 static const arm_feature_set fpu_crypto_ext_armv8 =
242 ARM_FEATURE (0, FPU_CRYPTO_EXT_ARMV8);
243 static const arm_feature_set crc_ext_armv8 =
244 ARM_FEATURE (0, CRC_EXT_ARMV8);
246 static int mfloat_abi_opt = -1;
247 /* Record user cpu selection for object attributes. */
248 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
249 /* Must be long enough to hold any of the names in arm_cpus. */
250 static char selected_cpu_name[16];
252 extern FLONUM_TYPE generic_floating_point_number;
254 /* Return if no cpu was selected on command-line. */
256 no_cpu_selected (void)
258 return selected_cpu.core == arm_arch_none.core
259 && selected_cpu.coproc == arm_arch_none.coproc;
264 static int meabi_flags = EABI_DEFAULT;
266 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
269 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
274 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
279 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
280 symbolS * GOT_symbol;
283 /* 0: assemble for ARM,
284 1: assemble for Thumb,
285 2: assemble for Thumb even though target CPU does not support thumb
287 static int thumb_mode = 0;
288 /* A value distinct from the possible values for thumb_mode that we
289 can use to record whether thumb_mode has been copied into the
290 tc_frag_data field of a frag. */
291 #define MODE_RECORDED (1 << 4)
293 /* Specifies the intrinsic IT insn behavior mode. */
294 enum implicit_it_mode
296 IMPLICIT_IT_MODE_NEVER = 0x00,
297 IMPLICIT_IT_MODE_ARM = 0x01,
298 IMPLICIT_IT_MODE_THUMB = 0x02,
299 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
301 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
303 /* If unified_syntax is true, we are processing the new unified
304 ARM/Thumb syntax. Important differences from the old ARM mode:
306 - Immediate operands do not require a # prefix.
307 - Conditional affixes always appear at the end of the
308 instruction. (For backward compatibility, those instructions
309 that formerly had them in the middle, continue to accept them
311 - The IT instruction may appear, and if it does is validated
312 against subsequent conditional affixes. It does not generate
315 Important differences from the old Thumb mode:
317 - Immediate operands do not require a # prefix.
318 - Most of the V6T2 instructions are only available in unified mode.
319 - The .N and .W suffixes are recognized and honored (it is an error
320 if they cannot be honored).
321 - All instructions set the flags if and only if they have an 's' affix.
322 - Conditional affixes may be used. They are validated against
323 preceding IT instructions. Unlike ARM mode, you cannot use a
324 conditional affix except in the scope of an IT instruction. */
326 static bfd_boolean unified_syntax = FALSE;
328 /* An immediate operand can start with #, and ld*, st*, pld operands
329 can contain [ and ]. We need to tell APP not to elide whitespace
330 before a [, which can appear as the first operand for pld.
331 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
332 const char arm_symbol_chars[] = "#[]{}";
347 enum neon_el_type type;
351 #define NEON_MAX_TYPE_ELS 4
355 struct neon_type_el el[NEON_MAX_TYPE_ELS];
359 enum it_instruction_type
364 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
365 if inside, should be the last one. */
366 NEUTRAL_IT_INSN, /* This could be either inside or outside,
367 i.e. BKPT and NOP. */
368 IT_INSN /* The IT insn has been parsed. */
371 /* The maximum number of operands we need. */
372 #define ARM_IT_MAX_OPERANDS 6
377 unsigned long instruction;
381 /* "uncond_value" is set to the value in place of the conditional field in
382 unconditional versions of the instruction, or -1 if nothing is
385 struct neon_type vectype;
386 /* This does not indicate an actual NEON instruction, only that
387 the mnemonic accepts neon-style type suffixes. */
389 /* Set to the opcode if the instruction needs relaxation.
390 Zero if the instruction is not relaxed. */
394 bfd_reloc_code_real_type type;
399 enum it_instruction_type it_insn_type;
405 struct neon_type_el vectype;
406 unsigned present : 1; /* Operand present. */
407 unsigned isreg : 1; /* Operand was a register. */
408 unsigned immisreg : 1; /* .imm field is a second register. */
409 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
410 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
411 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
412 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
413 instructions. This allows us to disambiguate ARM <-> vector insns. */
414 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
415 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
416 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
417 unsigned issingle : 1; /* Operand is VFP single-precision register. */
418 unsigned hasreloc : 1; /* Operand has relocation suffix. */
419 unsigned writeback : 1; /* Operand has trailing ! */
420 unsigned preind : 1; /* Preindexed address. */
421 unsigned postind : 1; /* Postindexed address. */
422 unsigned negative : 1; /* Index register was negated. */
423 unsigned shifted : 1; /* Shift applied to operation. */
424 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
425 } operands[ARM_IT_MAX_OPERANDS];
428 static struct arm_it inst;
430 #define NUM_FLOAT_VALS 8
432 const char * fp_const[] =
434 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
437 /* Number of littlenums required to hold an extended precision number. */
438 #define MAX_LITTLENUMS 6
440 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
450 #define CP_T_X 0x00008000
451 #define CP_T_Y 0x00400000
453 #define CONDS_BIT 0x00100000
454 #define LOAD_BIT 0x00100000
456 #define DOUBLE_LOAD_FLAG 0x00000001
460 const char * template_name;
464 #define COND_ALWAYS 0xE
468 const char * template_name;
472 struct asm_barrier_opt
474 const char * template_name;
476 const arm_feature_set arch;
479 /* The bit that distinguishes CPSR and SPSR. */
480 #define SPSR_BIT (1 << 22)
482 /* The individual PSR flag bits. */
483 #define PSR_c (1 << 16)
484 #define PSR_x (1 << 17)
485 #define PSR_s (1 << 18)
486 #define PSR_f (1 << 19)
491 bfd_reloc_code_real_type reloc;
496 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
497 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
502 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
505 /* Bits for DEFINED field in neon_typed_alias. */
506 #define NTA_HASTYPE 1
507 #define NTA_HASINDEX 2
509 struct neon_typed_alias
511 unsigned char defined;
513 struct neon_type_el eltype;
516 /* ARM register categories. This includes coprocessor numbers and various
517 architecture extensions' registers. */
544 /* Structure for a hash table entry for a register.
545 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
546 information which states whether a vector type or index is specified (for a
547 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
553 unsigned char builtin;
554 struct neon_typed_alias * neon;
557 /* Diagnostics used when we don't get a register of the expected type. */
558 const char * const reg_expected_msgs[] =
560 N_("ARM register expected"),
561 N_("bad or missing co-processor number"),
562 N_("co-processor register expected"),
563 N_("FPA register expected"),
564 N_("VFP single precision register expected"),
565 N_("VFP/Neon double precision register expected"),
566 N_("Neon quad precision register expected"),
567 N_("VFP single or double precision register expected"),
568 N_("Neon double or quad precision register expected"),
569 N_("VFP single, double or Neon quad precision register expected"),
570 N_("VFP system register expected"),
571 N_("Maverick MVF register expected"),
572 N_("Maverick MVD register expected"),
573 N_("Maverick MVFX register expected"),
574 N_("Maverick MVDX register expected"),
575 N_("Maverick MVAX register expected"),
576 N_("Maverick DSPSC register expected"),
577 N_("iWMMXt data register expected"),
578 N_("iWMMXt control register expected"),
579 N_("iWMMXt scalar register expected"),
580 N_("XScale accumulator register expected"),
583 /* Some well known registers that we refer to directly elsewhere. */
589 /* ARM instructions take 4bytes in the object file, Thumb instructions
595 /* Basic string to match. */
596 const char * template_name;
598 /* Parameters to instruction. */
599 unsigned int operands[8];
601 /* Conditional tag - see opcode_lookup. */
602 unsigned int tag : 4;
604 /* Basic instruction code. */
605 unsigned int avalue : 28;
607 /* Thumb-format instruction code. */
610 /* Which architecture variant provides this instruction. */
611 const arm_feature_set * avariant;
612 const arm_feature_set * tvariant;
614 /* Function to call to encode instruction in ARM format. */
615 void (* aencode) (void);
617 /* Function to call to encode instruction in Thumb format. */
618 void (* tencode) (void);
621 /* Defines for various bits that we will want to toggle. */
622 #define INST_IMMEDIATE 0x02000000
623 #define OFFSET_REG 0x02000000
624 #define HWOFFSET_IMM 0x00400000
625 #define SHIFT_BY_REG 0x00000010
626 #define PRE_INDEX 0x01000000
627 #define INDEX_UP 0x00800000
628 #define WRITE_BACK 0x00200000
629 #define LDM_TYPE_2_OR_3 0x00400000
630 #define CPSI_MMOD 0x00020000
632 #define LITERAL_MASK 0xf000f000
633 #define OPCODE_MASK 0xfe1fffff
634 #define V4_STR_BIT 0x00000020
635 #define VLDR_VMOV_SAME 0x0040f000
637 #define T2_SUBS_PC_LR 0xf3de8f00
639 #define DATA_OP_SHIFT 21
641 #define T2_OPCODE_MASK 0xfe1fffff
642 #define T2_DATA_OP_SHIFT 21
644 #define A_COND_MASK 0xf0000000
645 #define A_PUSH_POP_OP_MASK 0x0fff0000
647 /* Opcodes for pushing/poping registers to/from the stack. */
648 #define A1_OPCODE_PUSH 0x092d0000
649 #define A2_OPCODE_PUSH 0x052d0004
650 #define A2_OPCODE_POP 0x049d0004
652 /* Codes to distinguish the arithmetic instructions. */
663 #define OPCODE_CMP 10
664 #define OPCODE_CMN 11
665 #define OPCODE_ORR 12
666 #define OPCODE_MOV 13
667 #define OPCODE_BIC 14
668 #define OPCODE_MVN 15
670 #define T2_OPCODE_AND 0
671 #define T2_OPCODE_BIC 1
672 #define T2_OPCODE_ORR 2
673 #define T2_OPCODE_ORN 3
674 #define T2_OPCODE_EOR 4
675 #define T2_OPCODE_ADD 8
676 #define T2_OPCODE_ADC 10
677 #define T2_OPCODE_SBC 11
678 #define T2_OPCODE_SUB 13
679 #define T2_OPCODE_RSB 14
681 #define T_OPCODE_MUL 0x4340
682 #define T_OPCODE_TST 0x4200
683 #define T_OPCODE_CMN 0x42c0
684 #define T_OPCODE_NEG 0x4240
685 #define T_OPCODE_MVN 0x43c0
687 #define T_OPCODE_ADD_R3 0x1800
688 #define T_OPCODE_SUB_R3 0x1a00
689 #define T_OPCODE_ADD_HI 0x4400
690 #define T_OPCODE_ADD_ST 0xb000
691 #define T_OPCODE_SUB_ST 0xb080
692 #define T_OPCODE_ADD_SP 0xa800
693 #define T_OPCODE_ADD_PC 0xa000
694 #define T_OPCODE_ADD_I8 0x3000
695 #define T_OPCODE_SUB_I8 0x3800
696 #define T_OPCODE_ADD_I3 0x1c00
697 #define T_OPCODE_SUB_I3 0x1e00
699 #define T_OPCODE_ASR_R 0x4100
700 #define T_OPCODE_LSL_R 0x4080
701 #define T_OPCODE_LSR_R 0x40c0
702 #define T_OPCODE_ROR_R 0x41c0
703 #define T_OPCODE_ASR_I 0x1000
704 #define T_OPCODE_LSL_I 0x0000
705 #define T_OPCODE_LSR_I 0x0800
707 #define T_OPCODE_MOV_I8 0x2000
708 #define T_OPCODE_CMP_I8 0x2800
709 #define T_OPCODE_CMP_LR 0x4280
710 #define T_OPCODE_MOV_HR 0x4600
711 #define T_OPCODE_CMP_HR 0x4500
713 #define T_OPCODE_LDR_PC 0x4800
714 #define T_OPCODE_LDR_SP 0x9800
715 #define T_OPCODE_STR_SP 0x9000
716 #define T_OPCODE_LDR_IW 0x6800
717 #define T_OPCODE_STR_IW 0x6000
718 #define T_OPCODE_LDR_IH 0x8800
719 #define T_OPCODE_STR_IH 0x8000
720 #define T_OPCODE_LDR_IB 0x7800
721 #define T_OPCODE_STR_IB 0x7000
722 #define T_OPCODE_LDR_RW 0x5800
723 #define T_OPCODE_STR_RW 0x5000
724 #define T_OPCODE_LDR_RH 0x5a00
725 #define T_OPCODE_STR_RH 0x5200
726 #define T_OPCODE_LDR_RB 0x5c00
727 #define T_OPCODE_STR_RB 0x5400
729 #define T_OPCODE_PUSH 0xb400
730 #define T_OPCODE_POP 0xbc00
732 #define T_OPCODE_BRANCH 0xe000
734 #define THUMB_SIZE 2 /* Size of thumb instruction. */
735 #define THUMB_PP_PC_LR 0x0100
736 #define THUMB_LOAD_BIT 0x0800
737 #define THUMB2_LOAD_BIT 0x00100000
739 #define BAD_ARGS _("bad arguments to instruction")
740 #define BAD_SP _("r13 not allowed here")
741 #define BAD_PC _("r15 not allowed here")
742 #define BAD_COND _("instruction cannot be conditional")
743 #define BAD_OVERLAP _("registers may not be the same")
744 #define BAD_HIREG _("lo register required")
745 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
746 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
747 #define BAD_BRANCH _("branch must be last instruction in IT block")
748 #define BAD_NOT_IT _("instruction not allowed in IT block")
749 #define BAD_FPU _("selected FPU does not support instruction")
750 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
751 #define BAD_IT_COND _("incorrect condition in IT block")
752 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
753 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
754 #define BAD_PC_ADDRESSING \
755 _("cannot use register index with PC-relative addressing")
756 #define BAD_PC_WRITEBACK \
757 _("cannot use writeback with PC-relative addressing")
758 #define BAD_RANGE _("branch out of range")
759 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
761 static struct hash_control * arm_ops_hsh;
762 static struct hash_control * arm_cond_hsh;
763 static struct hash_control * arm_shift_hsh;
764 static struct hash_control * arm_psr_hsh;
765 static struct hash_control * arm_v7m_psr_hsh;
766 static struct hash_control * arm_reg_hsh;
767 static struct hash_control * arm_reloc_hsh;
768 static struct hash_control * arm_barrier_opt_hsh;
770 /* Stuff needed to resolve the label ambiguity
779 symbolS * last_label_seen;
780 static int label_is_thumb_function_name = FALSE;
782 /* Literal pool structure. Held on a per-section
783 and per-sub-section basis. */
785 #define MAX_LITERAL_POOL_SIZE 1024
786 typedef struct literal_pool
788 expressionS literals [MAX_LITERAL_POOL_SIZE];
789 unsigned int next_free_entry;
795 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
797 struct literal_pool * next;
798 unsigned int alignment;
801 /* Pointer to a linked list of literal pools. */
802 literal_pool * list_of_pools = NULL;
804 typedef enum asmfunc_states
807 WAITING_ASMFUNC_NAME,
811 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
814 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
816 static struct current_it now_it;
820 now_it_compatible (int cond)
822 return (cond & ~1) == (now_it.cc & ~1);
826 conditional_insn (void)
828 return inst.cond != COND_ALWAYS;
831 static int in_it_block (void);
833 static int handle_it_state (void);
835 static void force_automatic_it_block_close (void);
837 static void it_fsm_post_encode (void);
839 #define set_it_insn_type(type) \
842 inst.it_insn_type = type; \
843 if (handle_it_state () == FAIL) \
848 #define set_it_insn_type_nonvoid(type, failret) \
851 inst.it_insn_type = type; \
852 if (handle_it_state () == FAIL) \
857 #define set_it_insn_type_last() \
860 if (inst.cond == COND_ALWAYS) \
861 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
863 set_it_insn_type (INSIDE_IT_LAST_INSN); \
869 /* This array holds the chars that always start a comment. If the
870 pre-processor is disabled, these aren't very useful. */
871 char arm_comment_chars[] = "@";
873 /* This array holds the chars that only start a comment at the beginning of
874 a line. If the line seems to have the form '# 123 filename'
875 .line and .file directives will appear in the pre-processed output. */
876 /* Note that input_file.c hand checks for '#' at the beginning of the
877 first line of the input file. This is because the compiler outputs
878 #NO_APP at the beginning of its output. */
879 /* Also note that comments like this one will always work. */
880 const char line_comment_chars[] = "#";
882 char arm_line_separator_chars[] = ";";
884 /* Chars that can be used to separate mant
885 from exp in floating point numbers. */
886 const char EXP_CHARS[] = "eE";
888 /* Chars that mean this number is a floating point constant. */
892 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
894 /* Prefix characters that indicate the start of an immediate
896 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
898 /* Separator character handling. */
900 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
903 skip_past_char (char ** str, char c)
905 /* PR gas/14987: Allow for whitespace before the expected character. */
906 skip_whitespace (*str);
917 #define skip_past_comma(str) skip_past_char (str, ',')
919 /* Arithmetic expressions (possibly involving symbols). */
921 /* Return TRUE if anything in the expression is a bignum. */
924 walk_no_bignums (symbolS * sp)
926 if (symbol_get_value_expression (sp)->X_op == O_big)
929 if (symbol_get_value_expression (sp)->X_add_symbol)
931 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
932 || (symbol_get_value_expression (sp)->X_op_symbol
933 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
939 static int in_my_get_expression = 0;
941 /* Third argument to my_get_expression. */
942 #define GE_NO_PREFIX 0
943 #define GE_IMM_PREFIX 1
944 #define GE_OPT_PREFIX 2
945 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
946 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
947 #define GE_OPT_PREFIX_BIG 3
950 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
955 /* In unified syntax, all prefixes are optional. */
957 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
962 case GE_NO_PREFIX: break;
964 if (!is_immediate_prefix (**str))
966 inst.error = _("immediate expression requires a # prefix");
972 case GE_OPT_PREFIX_BIG:
973 if (is_immediate_prefix (**str))
979 memset (ep, 0, sizeof (expressionS));
981 save_in = input_line_pointer;
982 input_line_pointer = *str;
983 in_my_get_expression = 1;
984 seg = expression (ep);
985 in_my_get_expression = 0;
987 if (ep->X_op == O_illegal || ep->X_op == O_absent)
989 /* We found a bad or missing expression in md_operand(). */
990 *str = input_line_pointer;
991 input_line_pointer = save_in;
992 if (inst.error == NULL)
993 inst.error = (ep->X_op == O_absent
994 ? _("missing expression") :_("bad expression"));
999 if (seg != absolute_section
1000 && seg != text_section
1001 && seg != data_section
1002 && seg != bss_section
1003 && seg != undefined_section)
1005 inst.error = _("bad segment");
1006 *str = input_line_pointer;
1007 input_line_pointer = save_in;
1014 /* Get rid of any bignums now, so that we don't generate an error for which
1015 we can't establish a line number later on. Big numbers are never valid
1016 in instructions, which is where this routine is always called. */
1017 if (prefix_mode != GE_OPT_PREFIX_BIG
1018 && (ep->X_op == O_big
1019 || (ep->X_add_symbol
1020 && (walk_no_bignums (ep->X_add_symbol)
1022 && walk_no_bignums (ep->X_op_symbol))))))
1024 inst.error = _("invalid constant");
1025 *str = input_line_pointer;
1026 input_line_pointer = save_in;
1030 *str = input_line_pointer;
1031 input_line_pointer = save_in;
1035 /* Turn a string in input_line_pointer into a floating point constant
1036 of type TYPE, and store the appropriate bytes in *LITP. The number
1037 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1038 returned, or NULL on OK.
1040 Note that fp constants aren't represent in the normal way on the ARM.
1041 In big endian mode, things are as expected. However, in little endian
1042 mode fp constants are big-endian word-wise, and little-endian byte-wise
1043 within the words. For example, (double) 1.1 in big endian mode is
1044 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1045 the byte sequence 99 99 f1 3f 9a 99 99 99.
1047 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1050 md_atof (int type, char * litP, int * sizeP)
1053 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1085 return _("Unrecognized or unsupported floating point constant");
1088 t = atof_ieee (input_line_pointer, type, words);
1090 input_line_pointer = t;
1091 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1093 if (target_big_endian)
1095 for (i = 0; i < prec; i++)
1097 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1098 litP += sizeof (LITTLENUM_TYPE);
1103 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1104 for (i = prec - 1; i >= 0; i--)
1106 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1107 litP += sizeof (LITTLENUM_TYPE);
1110 /* For a 4 byte float the order of elements in `words' is 1 0.
1111 For an 8 byte float the order is 1 0 3 2. */
1112 for (i = 0; i < prec; i += 2)
1114 md_number_to_chars (litP, (valueT) words[i + 1],
1115 sizeof (LITTLENUM_TYPE));
1116 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1117 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1118 litP += 2 * sizeof (LITTLENUM_TYPE);
1125 /* We handle all bad expressions here, so that we can report the faulty
1126 instruction in the error message. */
1128 md_operand (expressionS * exp)
1130 if (in_my_get_expression)
1131 exp->X_op = O_illegal;
1134 /* Immediate values. */
1136 /* Generic immediate-value read function for use in directives.
1137 Accepts anything that 'expression' can fold to a constant.
1138 *val receives the number. */
1141 immediate_for_directive (int *val)
1144 exp.X_op = O_illegal;
1146 if (is_immediate_prefix (*input_line_pointer))
1148 input_line_pointer++;
1152 if (exp.X_op != O_constant)
1154 as_bad (_("expected #constant"));
1155 ignore_rest_of_line ();
1158 *val = exp.X_add_number;
1163 /* Register parsing. */
1165 /* Generic register parser. CCP points to what should be the
1166 beginning of a register name. If it is indeed a valid register
1167 name, advance CCP over it and return the reg_entry structure;
1168 otherwise return NULL. Does not issue diagnostics. */
1170 static struct reg_entry *
1171 arm_reg_parse_multi (char **ccp)
1175 struct reg_entry *reg;
1177 skip_whitespace (start);
1179 #ifdef REGISTER_PREFIX
1180 if (*start != REGISTER_PREFIX)
1184 #ifdef OPTIONAL_REGISTER_PREFIX
1185 if (*start == OPTIONAL_REGISTER_PREFIX)
1190 if (!ISALPHA (*p) || !is_name_beginner (*p))
1195 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1197 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1207 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1208 enum arm_reg_type type)
1210 /* Alternative syntaxes are accepted for a few register classes. */
1217 /* Generic coprocessor register names are allowed for these. */
1218 if (reg && reg->type == REG_TYPE_CN)
1223 /* For backward compatibility, a bare number is valid here. */
1225 unsigned long processor = strtoul (start, ccp, 10);
1226 if (*ccp != start && processor <= 15)
1230 case REG_TYPE_MMXWC:
1231 /* WC includes WCG. ??? I'm not sure this is true for all
1232 instructions that take WC registers. */
1233 if (reg && reg->type == REG_TYPE_MMXWCG)
1244 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1245 return value is the register number or FAIL. */
1248 arm_reg_parse (char **ccp, enum arm_reg_type type)
1251 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1254 /* Do not allow a scalar (reg+index) to parse as a register. */
1255 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1258 if (reg && reg->type == type)
1261 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1268 /* Parse a Neon type specifier. *STR should point at the leading '.'
1269 character. Does no verification at this stage that the type fits the opcode
1276 Can all be legally parsed by this function.
1278 Fills in neon_type struct pointer with parsed information, and updates STR
1279 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1280 type, FAIL if not. */
1283 parse_neon_type (struct neon_type *type, char **str)
1290 while (type->elems < NEON_MAX_TYPE_ELS)
1292 enum neon_el_type thistype = NT_untyped;
1293 unsigned thissize = -1u;
1300 /* Just a size without an explicit type. */
1304 switch (TOLOWER (*ptr))
1306 case 'i': thistype = NT_integer; break;
1307 case 'f': thistype = NT_float; break;
1308 case 'p': thistype = NT_poly; break;
1309 case 's': thistype = NT_signed; break;
1310 case 'u': thistype = NT_unsigned; break;
1312 thistype = NT_float;
1317 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1323 /* .f is an abbreviation for .f32. */
1324 if (thistype == NT_float && !ISDIGIT (*ptr))
1329 thissize = strtoul (ptr, &ptr, 10);
1331 if (thissize != 8 && thissize != 16 && thissize != 32
1334 as_bad (_("bad size %d in type specifier"), thissize);
1342 type->el[type->elems].type = thistype;
1343 type->el[type->elems].size = thissize;
1348 /* Empty/missing type is not a successful parse. */
1349 if (type->elems == 0)
1357 /* Errors may be set multiple times during parsing or bit encoding
1358 (particularly in the Neon bits), but usually the earliest error which is set
1359 will be the most meaningful. Avoid overwriting it with later (cascading)
1360 errors by calling this function. */
1363 first_error (const char *err)
1369 /* Parse a single type, e.g. ".s32", leading period included. */
1371 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1374 struct neon_type optype;
1378 if (parse_neon_type (&optype, &str) == SUCCESS)
1380 if (optype.elems == 1)
1381 *vectype = optype.el[0];
1384 first_error (_("only one type should be specified for operand"));
1390 first_error (_("vector type expected"));
1402 /* Special meanings for indices (which have a range of 0-7), which will fit into
1405 #define NEON_ALL_LANES 15
1406 #define NEON_INTERLEAVE_LANES 14
1408 /* Parse either a register or a scalar, with an optional type. Return the
1409 register number, and optionally fill in the actual type of the register
1410 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1411 type/index information in *TYPEINFO. */
1414 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1415 enum arm_reg_type *rtype,
1416 struct neon_typed_alias *typeinfo)
1419 struct reg_entry *reg = arm_reg_parse_multi (&str);
1420 struct neon_typed_alias atype;
1421 struct neon_type_el parsetype;
1425 atype.eltype.type = NT_invtype;
1426 atype.eltype.size = -1;
1428 /* Try alternate syntax for some types of register. Note these are mutually
1429 exclusive with the Neon syntax extensions. */
1432 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1440 /* Undo polymorphism when a set of register types may be accepted. */
1441 if ((type == REG_TYPE_NDQ
1442 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1443 || (type == REG_TYPE_VFSD
1444 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1445 || (type == REG_TYPE_NSDQ
1446 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1447 || reg->type == REG_TYPE_NQ))
1448 || (type == REG_TYPE_MMXWC
1449 && (reg->type == REG_TYPE_MMXWCG)))
1450 type = (enum arm_reg_type) reg->type;
1452 if (type != reg->type)
1458 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1460 if ((atype.defined & NTA_HASTYPE) != 0)
1462 first_error (_("can't redefine type for operand"));
1465 atype.defined |= NTA_HASTYPE;
1466 atype.eltype = parsetype;
1469 if (skip_past_char (&str, '[') == SUCCESS)
1471 if (type != REG_TYPE_VFD)
1473 first_error (_("only D registers may be indexed"));
1477 if ((atype.defined & NTA_HASINDEX) != 0)
1479 first_error (_("can't change index for operand"));
1483 atype.defined |= NTA_HASINDEX;
1485 if (skip_past_char (&str, ']') == SUCCESS)
1486 atype.index = NEON_ALL_LANES;
1491 my_get_expression (&exp, &str, GE_NO_PREFIX);
1493 if (exp.X_op != O_constant)
1495 first_error (_("constant expression required"));
1499 if (skip_past_char (&str, ']') == FAIL)
1502 atype.index = exp.X_add_number;
1517 /* Like arm_reg_parse, but allow allow the following extra features:
1518 - If RTYPE is non-zero, return the (possibly restricted) type of the
1519 register (e.g. Neon double or quad reg when either has been requested).
1520 - If this is a Neon vector type with additional type information, fill
1521 in the struct pointed to by VECTYPE (if non-NULL).
1522 This function will fault on encountering a scalar. */
1525 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1526 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1528 struct neon_typed_alias atype;
1530 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1535 /* Do not allow regname(... to parse as a register. */
1539 /* Do not allow a scalar (reg+index) to parse as a register. */
1540 if ((atype.defined & NTA_HASINDEX) != 0)
1542 first_error (_("register operand expected, but got scalar"));
1547 *vectype = atype.eltype;
1554 #define NEON_SCALAR_REG(X) ((X) >> 4)
1555 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1557 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1558 have enough information to be able to do a good job bounds-checking. So, we
1559 just do easy checks here, and do further checks later. */
1562 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1566 struct neon_typed_alias atype;
1568 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1570 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1573 if (atype.index == NEON_ALL_LANES)
1575 first_error (_("scalar must have an index"));
1578 else if (atype.index >= 64 / elsize)
1580 first_error (_("scalar index out of range"));
1585 *type = atype.eltype;
1589 return reg * 16 + atype.index;
1592 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1595 parse_reg_list (char ** strp)
1597 char * str = * strp;
1601 /* We come back here if we get ranges concatenated by '+' or '|'. */
1604 skip_whitespace (str);
1618 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1620 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1630 first_error (_("bad range in register list"));
1634 for (i = cur_reg + 1; i < reg; i++)
1636 if (range & (1 << i))
1638 (_("Warning: duplicated register (r%d) in register list"),
1646 if (range & (1 << reg))
1647 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1649 else if (reg <= cur_reg)
1650 as_tsktsk (_("Warning: register range not in ascending order"));
1655 while (skip_past_comma (&str) != FAIL
1656 || (in_range = 1, *str++ == '-'));
1659 if (skip_past_char (&str, '}') == FAIL)
1661 first_error (_("missing `}'"));
1669 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1672 if (exp.X_op == O_constant)
1674 if (exp.X_add_number
1675 != (exp.X_add_number & 0x0000ffff))
1677 inst.error = _("invalid register mask");
1681 if ((range & exp.X_add_number) != 0)
1683 int regno = range & exp.X_add_number;
1686 regno = (1 << regno) - 1;
1688 (_("Warning: duplicated register (r%d) in register list"),
1692 range |= exp.X_add_number;
1696 if (inst.reloc.type != 0)
1698 inst.error = _("expression too complex");
1702 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1703 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1704 inst.reloc.pc_rel = 0;
1708 if (*str == '|' || *str == '+')
1714 while (another_range);
1720 /* Types of registers in a list. */
1729 /* Parse a VFP register list. If the string is invalid return FAIL.
1730 Otherwise return the number of registers, and set PBASE to the first
1731 register. Parses registers of type ETYPE.
1732 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1733 - Q registers can be used to specify pairs of D registers
1734 - { } can be omitted from around a singleton register list
1735 FIXME: This is not implemented, as it would require backtracking in
1738 This could be done (the meaning isn't really ambiguous), but doesn't
1739 fit in well with the current parsing framework.
1740 - 32 D registers may be used (also true for VFPv3).
1741 FIXME: Types are ignored in these register lists, which is probably a
1745 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1750 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1754 unsigned long mask = 0;
1757 if (skip_past_char (&str, '{') == FAIL)
1759 inst.error = _("expecting {");
1766 regtype = REG_TYPE_VFS;
1771 regtype = REG_TYPE_VFD;
1774 case REGLIST_NEON_D:
1775 regtype = REG_TYPE_NDQ;
1779 if (etype != REGLIST_VFP_S)
1781 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1782 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1786 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1789 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1796 base_reg = max_regs;
1800 int setmask = 1, addregs = 1;
1802 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
1804 if (new_base == FAIL)
1806 first_error (_(reg_expected_msgs[regtype]));
1810 if (new_base >= max_regs)
1812 first_error (_("register out of range in list"));
1816 /* Note: a value of 2 * n is returned for the register Q<n>. */
1817 if (regtype == REG_TYPE_NQ)
1823 if (new_base < base_reg)
1824 base_reg = new_base;
1826 if (mask & (setmask << new_base))
1828 first_error (_("invalid register list"));
1832 if ((mask >> new_base) != 0 && ! warned)
1834 as_tsktsk (_("register list not in ascending order"));
1838 mask |= setmask << new_base;
1841 if (*str == '-') /* We have the start of a range expression */
1847 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1850 inst.error = gettext (reg_expected_msgs[regtype]);
1854 if (high_range >= max_regs)
1856 first_error (_("register out of range in list"));
1860 if (regtype == REG_TYPE_NQ)
1861 high_range = high_range + 1;
1863 if (high_range <= new_base)
1865 inst.error = _("register range not in ascending order");
1869 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1871 if (mask & (setmask << new_base))
1873 inst.error = _("invalid register list");
1877 mask |= setmask << new_base;
1882 while (skip_past_comma (&str) != FAIL);
1886 /* Sanity check -- should have raised a parse error above. */
1887 if (count == 0 || count > max_regs)
1892 /* Final test -- the registers must be consecutive. */
1894 for (i = 0; i < count; i++)
1896 if ((mask & (1u << i)) == 0)
1898 inst.error = _("non-contiguous register range");
1908 /* True if two alias types are the same. */
1911 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1919 if (a->defined != b->defined)
1922 if ((a->defined & NTA_HASTYPE) != 0
1923 && (a->eltype.type != b->eltype.type
1924 || a->eltype.size != b->eltype.size))
1927 if ((a->defined & NTA_HASINDEX) != 0
1928 && (a->index != b->index))
1934 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1935 The base register is put in *PBASE.
1936 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1938 The register stride (minus one) is put in bit 4 of the return value.
1939 Bits [6:5] encode the list length (minus one).
1940 The type of the list elements is put in *ELTYPE, if non-NULL. */
1942 #define NEON_LANE(X) ((X) & 0xf)
1943 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1944 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1947 parse_neon_el_struct_list (char **str, unsigned *pbase,
1948 struct neon_type_el *eltype)
1955 int leading_brace = 0;
1956 enum arm_reg_type rtype = REG_TYPE_NDQ;
1957 const char *const incr_error = _("register stride must be 1 or 2");
1958 const char *const type_error = _("mismatched element/structure types in list");
1959 struct neon_typed_alias firsttype;
1961 if (skip_past_char (&ptr, '{') == SUCCESS)
1966 struct neon_typed_alias atype;
1967 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1971 first_error (_(reg_expected_msgs[rtype]));
1978 if (rtype == REG_TYPE_NQ)
1984 else if (reg_incr == -1)
1986 reg_incr = getreg - base_reg;
1987 if (reg_incr < 1 || reg_incr > 2)
1989 first_error (_(incr_error));
1993 else if (getreg != base_reg + reg_incr * count)
1995 first_error (_(incr_error));
1999 if (! neon_alias_types_same (&atype, &firsttype))
2001 first_error (_(type_error));
2005 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2009 struct neon_typed_alias htype;
2010 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2012 lane = NEON_INTERLEAVE_LANES;
2013 else if (lane != NEON_INTERLEAVE_LANES)
2015 first_error (_(type_error));
2020 else if (reg_incr != 1)
2022 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2026 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2029 first_error (_(reg_expected_msgs[rtype]));
2032 if (! neon_alias_types_same (&htype, &firsttype))
2034 first_error (_(type_error));
2037 count += hireg + dregs - getreg;
2041 /* If we're using Q registers, we can't use [] or [n] syntax. */
2042 if (rtype == REG_TYPE_NQ)
2048 if ((atype.defined & NTA_HASINDEX) != 0)
2052 else if (lane != atype.index)
2054 first_error (_(type_error));
2058 else if (lane == -1)
2059 lane = NEON_INTERLEAVE_LANES;
2060 else if (lane != NEON_INTERLEAVE_LANES)
2062 first_error (_(type_error));
2067 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2069 /* No lane set by [x]. We must be interleaving structures. */
2071 lane = NEON_INTERLEAVE_LANES;
2074 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2075 || (count > 1 && reg_incr == -1))
2077 first_error (_("error parsing element/structure list"));
2081 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2083 first_error (_("expected }"));
2091 *eltype = firsttype.eltype;
2096 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2099 /* Parse an explicit relocation suffix on an expression. This is
2100 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2101 arm_reloc_hsh contains no entries, so this function can only
2102 succeed if there is no () after the word. Returns -1 on error,
2103 BFD_RELOC_UNUSED if there wasn't any suffix. */
2106 parse_reloc (char **str)
2108 struct reloc_entry *r;
2112 return BFD_RELOC_UNUSED;
2117 while (*q && *q != ')' && *q != ',')
2122 if ((r = (struct reloc_entry *)
2123 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2130 /* Directives: register aliases. */
2132 static struct reg_entry *
2133 insert_reg_alias (char *str, unsigned number, int type)
2135 struct reg_entry *new_reg;
2138 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2140 if (new_reg->builtin)
2141 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2143 /* Only warn about a redefinition if it's not defined as the
2145 else if (new_reg->number != number || new_reg->type != type)
2146 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2151 name = xstrdup (str);
2152 new_reg = (struct reg_entry *) xmalloc (sizeof (struct reg_entry));
2154 new_reg->name = name;
2155 new_reg->number = number;
2156 new_reg->type = type;
2157 new_reg->builtin = FALSE;
2158 new_reg->neon = NULL;
2160 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2167 insert_neon_reg_alias (char *str, int number, int type,
2168 struct neon_typed_alias *atype)
2170 struct reg_entry *reg = insert_reg_alias (str, number, type);
2174 first_error (_("attempt to redefine typed alias"));
2180 reg->neon = (struct neon_typed_alias *)
2181 xmalloc (sizeof (struct neon_typed_alias));
2182 *reg->neon = *atype;
2186 /* Look for the .req directive. This is of the form:
2188 new_register_name .req existing_register_name
2190 If we find one, or if it looks sufficiently like one that we want to
2191 handle any error here, return TRUE. Otherwise return FALSE. */
2194 create_register_alias (char * newname, char *p)
2196 struct reg_entry *old;
2197 char *oldname, *nbuf;
2200 /* The input scrubber ensures that whitespace after the mnemonic is
2201 collapsed to single spaces. */
2203 if (strncmp (oldname, " .req ", 6) != 0)
2207 if (*oldname == '\0')
2210 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2213 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2217 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2218 the desired alias name, and p points to its end. If not, then
2219 the desired alias name is in the global original_case_string. */
2220 #ifdef TC_CASE_SENSITIVE
2223 newname = original_case_string;
2224 nlen = strlen (newname);
2227 nbuf = (char *) alloca (nlen + 1);
2228 memcpy (nbuf, newname, nlen);
2231 /* Create aliases under the new name as stated; an all-lowercase
2232 version of the new name; and an all-uppercase version of the new
2234 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2236 for (p = nbuf; *p; p++)
2239 if (strncmp (nbuf, newname, nlen))
2241 /* If this attempt to create an additional alias fails, do not bother
2242 trying to create the all-lower case alias. We will fail and issue
2243 a second, duplicate error message. This situation arises when the
2244 programmer does something like:
2247 The second .req creates the "Foo" alias but then fails to create
2248 the artificial FOO alias because it has already been created by the
2250 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2254 for (p = nbuf; *p; p++)
2257 if (strncmp (nbuf, newname, nlen))
2258 insert_reg_alias (nbuf, old->number, old->type);
2264 /* Create a Neon typed/indexed register alias using directives, e.g.:
2269 These typed registers can be used instead of the types specified after the
2270 Neon mnemonic, so long as all operands given have types. Types can also be
2271 specified directly, e.g.:
2272 vadd d0.s32, d1.s32, d2.s32 */
2275 create_neon_reg_alias (char *newname, char *p)
2277 enum arm_reg_type basetype;
2278 struct reg_entry *basereg;
2279 struct reg_entry mybasereg;
2280 struct neon_type ntype;
2281 struct neon_typed_alias typeinfo;
2282 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2285 typeinfo.defined = 0;
2286 typeinfo.eltype.type = NT_invtype;
2287 typeinfo.eltype.size = -1;
2288 typeinfo.index = -1;
2292 if (strncmp (p, " .dn ", 5) == 0)
2293 basetype = REG_TYPE_VFD;
2294 else if (strncmp (p, " .qn ", 5) == 0)
2295 basetype = REG_TYPE_NQ;
2304 basereg = arm_reg_parse_multi (&p);
2306 if (basereg && basereg->type != basetype)
2308 as_bad (_("bad type for register"));
2312 if (basereg == NULL)
2315 /* Try parsing as an integer. */
2316 my_get_expression (&exp, &p, GE_NO_PREFIX);
2317 if (exp.X_op != O_constant)
2319 as_bad (_("expression must be constant"));
2322 basereg = &mybasereg;
2323 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2329 typeinfo = *basereg->neon;
2331 if (parse_neon_type (&ntype, &p) == SUCCESS)
2333 /* We got a type. */
2334 if (typeinfo.defined & NTA_HASTYPE)
2336 as_bad (_("can't redefine the type of a register alias"));
2340 typeinfo.defined |= NTA_HASTYPE;
2341 if (ntype.elems != 1)
2343 as_bad (_("you must specify a single type only"));
2346 typeinfo.eltype = ntype.el[0];
2349 if (skip_past_char (&p, '[') == SUCCESS)
2352 /* We got a scalar index. */
2354 if (typeinfo.defined & NTA_HASINDEX)
2356 as_bad (_("can't redefine the index of a scalar alias"));
2360 my_get_expression (&exp, &p, GE_NO_PREFIX);
2362 if (exp.X_op != O_constant)
2364 as_bad (_("scalar index must be constant"));
2368 typeinfo.defined |= NTA_HASINDEX;
2369 typeinfo.index = exp.X_add_number;
2371 if (skip_past_char (&p, ']') == FAIL)
2373 as_bad (_("expecting ]"));
2378 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2379 the desired alias name, and p points to its end. If not, then
2380 the desired alias name is in the global original_case_string. */
2381 #ifdef TC_CASE_SENSITIVE
2382 namelen = nameend - newname;
2384 newname = original_case_string;
2385 namelen = strlen (newname);
2388 namebuf = (char *) alloca (namelen + 1);
2389 strncpy (namebuf, newname, namelen);
2390 namebuf[namelen] = '\0';
2392 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2393 typeinfo.defined != 0 ? &typeinfo : NULL);
2395 /* Insert name in all uppercase. */
2396 for (p = namebuf; *p; p++)
2399 if (strncmp (namebuf, newname, namelen))
2400 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2401 typeinfo.defined != 0 ? &typeinfo : NULL);
2403 /* Insert name in all lowercase. */
2404 for (p = namebuf; *p; p++)
2407 if (strncmp (namebuf, newname, namelen))
2408 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2409 typeinfo.defined != 0 ? &typeinfo : NULL);
2414 /* Should never be called, as .req goes between the alias and the
2415 register name, not at the beginning of the line. */
2418 s_req (int a ATTRIBUTE_UNUSED)
2420 as_bad (_("invalid syntax for .req directive"));
2424 s_dn (int a ATTRIBUTE_UNUSED)
2426 as_bad (_("invalid syntax for .dn directive"));
2430 s_qn (int a ATTRIBUTE_UNUSED)
2432 as_bad (_("invalid syntax for .qn directive"));
2435 /* The .unreq directive deletes an alias which was previously defined
2436 by .req. For example:
2442 s_unreq (int a ATTRIBUTE_UNUSED)
2447 name = input_line_pointer;
2449 while (*input_line_pointer != 0
2450 && *input_line_pointer != ' '
2451 && *input_line_pointer != '\n')
2452 ++input_line_pointer;
2454 saved_char = *input_line_pointer;
2455 *input_line_pointer = 0;
2458 as_bad (_("invalid syntax for .unreq directive"));
2461 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2465 as_bad (_("unknown register alias '%s'"), name);
2466 else if (reg->builtin)
2467 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2474 hash_delete (arm_reg_hsh, name, FALSE);
2475 free ((char *) reg->name);
2480 /* Also locate the all upper case and all lower case versions.
2481 Do not complain if we cannot find one or the other as it
2482 was probably deleted above. */
2484 nbuf = strdup (name);
2485 for (p = nbuf; *p; p++)
2487 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2490 hash_delete (arm_reg_hsh, nbuf, FALSE);
2491 free ((char *) reg->name);
2497 for (p = nbuf; *p; p++)
2499 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2502 hash_delete (arm_reg_hsh, nbuf, FALSE);
2503 free ((char *) reg->name);
2513 *input_line_pointer = saved_char;
2514 demand_empty_rest_of_line ();
2517 /* Directives: Instruction set selection. */
2520 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2521 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2522 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2523 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2525 /* Create a new mapping symbol for the transition to STATE. */
2528 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2531 const char * symname;
2538 type = BSF_NO_FLAGS;
2542 type = BSF_NO_FLAGS;
2546 type = BSF_NO_FLAGS;
2552 symbolP = symbol_new (symname, now_seg, value, frag);
2553 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2558 THUMB_SET_FUNC (symbolP, 0);
2559 ARM_SET_THUMB (symbolP, 0);
2560 ARM_SET_INTERWORK (symbolP, support_interwork);
2564 THUMB_SET_FUNC (symbolP, 1);
2565 ARM_SET_THUMB (symbolP, 1);
2566 ARM_SET_INTERWORK (symbolP, support_interwork);
2574 /* Save the mapping symbols for future reference. Also check that
2575 we do not place two mapping symbols at the same offset within a
2576 frag. We'll handle overlap between frags in
2577 check_mapping_symbols.
2579 If .fill or other data filling directive generates zero sized data,
2580 the mapping symbol for the following code will have the same value
2581 as the one generated for the data filling directive. In this case,
2582 we replace the old symbol with the new one at the same address. */
2585 if (frag->tc_frag_data.first_map != NULL)
2587 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2588 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2590 frag->tc_frag_data.first_map = symbolP;
2592 if (frag->tc_frag_data.last_map != NULL)
2594 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2595 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2596 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2598 frag->tc_frag_data.last_map = symbolP;
2601 /* We must sometimes convert a region marked as code to data during
2602 code alignment, if an odd number of bytes have to be padded. The
2603 code mapping symbol is pushed to an aligned address. */
2606 insert_data_mapping_symbol (enum mstate state,
2607 valueT value, fragS *frag, offsetT bytes)
2609 /* If there was already a mapping symbol, remove it. */
2610 if (frag->tc_frag_data.last_map != NULL
2611 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2613 symbolS *symp = frag->tc_frag_data.last_map;
2617 know (frag->tc_frag_data.first_map == symp);
2618 frag->tc_frag_data.first_map = NULL;
2620 frag->tc_frag_data.last_map = NULL;
2621 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2624 make_mapping_symbol (MAP_DATA, value, frag);
2625 make_mapping_symbol (state, value + bytes, frag);
2628 static void mapping_state_2 (enum mstate state, int max_chars);
2630 /* Set the mapping state to STATE. Only call this when about to
2631 emit some STATE bytes to the file. */
2634 mapping_state (enum mstate state)
2636 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2638 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2640 if (mapstate == state)
2641 /* The mapping symbol has already been emitted.
2642 There is nothing else to do. */
2645 if (state == MAP_ARM || state == MAP_THUMB)
2647 All ARM instructions require 4-byte alignment.
2648 (Almost) all Thumb instructions require 2-byte alignment.
2650 When emitting instructions into any section, mark the section
2653 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2654 but themselves require 2-byte alignment; this applies to some
2655 PC- relative forms. However, these cases will invovle implicit
2656 literal pool generation or an explicit .align >=2, both of
2657 which will cause the section to me marked with sufficient
2658 alignment. Thus, we don't handle those cases here. */
2659 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2661 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2662 /* This case will be evaluated later in the next else. */
2664 else if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2665 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2667 /* Only add the symbol if the offset is > 0:
2668 if we're at the first frag, check it's size > 0;
2669 if we're not at the first frag, then for sure
2670 the offset is > 0. */
2671 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2672 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2675 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2678 mapping_state_2 (state, 0);
2682 /* Same as mapping_state, but MAX_CHARS bytes have already been
2683 allocated. Put the mapping symbol that far back. */
2686 mapping_state_2 (enum mstate state, int max_chars)
2688 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2690 if (!SEG_NORMAL (now_seg))
2693 if (mapstate == state)
2694 /* The mapping symbol has already been emitted.
2695 There is nothing else to do. */
2698 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2699 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2702 #define mapping_state(x) ((void)0)
2703 #define mapping_state_2(x, y) ((void)0)
2706 /* Find the real, Thumb encoded start of a Thumb function. */
2710 find_real_start (symbolS * symbolP)
2713 const char * name = S_GET_NAME (symbolP);
2714 symbolS * new_target;
2716 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2717 #define STUB_NAME ".real_start_of"
2722 /* The compiler may generate BL instructions to local labels because
2723 it needs to perform a branch to a far away location. These labels
2724 do not have a corresponding ".real_start_of" label. We check
2725 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2726 the ".real_start_of" convention for nonlocal branches. */
2727 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2730 real_start = ACONCAT ((STUB_NAME, name, NULL));
2731 new_target = symbol_find (real_start);
2733 if (new_target == NULL)
2735 as_warn (_("Failed to find real start of function: %s\n"), name);
2736 new_target = symbolP;
2744 opcode_select (int width)
2751 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2752 as_bad (_("selected processor does not support THUMB opcodes"));
2755 /* No need to force the alignment, since we will have been
2756 coming from ARM mode, which is word-aligned. */
2757 record_alignment (now_seg, 1);
2764 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2765 as_bad (_("selected processor does not support ARM opcodes"));
2770 frag_align (2, 0, 0);
2772 record_alignment (now_seg, 1);
2777 as_bad (_("invalid instruction size selected (%d)"), width);
2782 s_arm (int ignore ATTRIBUTE_UNUSED)
2785 demand_empty_rest_of_line ();
2789 s_thumb (int ignore ATTRIBUTE_UNUSED)
2792 demand_empty_rest_of_line ();
2796 s_code (int unused ATTRIBUTE_UNUSED)
2800 temp = get_absolute_expression ();
2805 opcode_select (temp);
2809 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2814 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2816 /* If we are not already in thumb mode go into it, EVEN if
2817 the target processor does not support thumb instructions.
2818 This is used by gcc/config/arm/lib1funcs.asm for example
2819 to compile interworking support functions even if the
2820 target processor should not support interworking. */
2824 record_alignment (now_seg, 1);
2827 demand_empty_rest_of_line ();
2831 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2835 /* The following label is the name/address of the start of a Thumb function.
2836 We need to know this for the interworking support. */
2837 label_is_thumb_function_name = TRUE;
2840 /* Perform a .set directive, but also mark the alias as
2841 being a thumb function. */
2844 s_thumb_set (int equiv)
2846 /* XXX the following is a duplicate of the code for s_set() in read.c
2847 We cannot just call that code as we need to get at the symbol that
2854 /* Especial apologies for the random logic:
2855 This just grew, and could be parsed much more simply!
2857 name = input_line_pointer;
2858 delim = get_symbol_end ();
2859 end_name = input_line_pointer;
2862 if (*input_line_pointer != ',')
2865 as_bad (_("expected comma after name \"%s\""), name);
2867 ignore_rest_of_line ();
2871 input_line_pointer++;
2874 if (name[0] == '.' && name[1] == '\0')
2876 /* XXX - this should not happen to .thumb_set. */
2880 if ((symbolP = symbol_find (name)) == NULL
2881 && (symbolP = md_undefined_symbol (name)) == NULL)
2884 /* When doing symbol listings, play games with dummy fragments living
2885 outside the normal fragment chain to record the file and line info
2887 if (listing & LISTING_SYMBOLS)
2889 extern struct list_info_struct * listing_tail;
2890 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2892 memset (dummy_frag, 0, sizeof (fragS));
2893 dummy_frag->fr_type = rs_fill;
2894 dummy_frag->line = listing_tail;
2895 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2896 dummy_frag->fr_symbol = symbolP;
2900 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2903 /* "set" symbols are local unless otherwise specified. */
2904 SF_SET_LOCAL (symbolP);
2905 #endif /* OBJ_COFF */
2906 } /* Make a new symbol. */
2908 symbol_table_insert (symbolP);
2913 && S_IS_DEFINED (symbolP)
2914 && S_GET_SEGMENT (symbolP) != reg_section)
2915 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2917 pseudo_set (symbolP);
2919 demand_empty_rest_of_line ();
2921 /* XXX Now we come to the Thumb specific bit of code. */
2923 THUMB_SET_FUNC (symbolP, 1);
2924 ARM_SET_THUMB (symbolP, 1);
2925 #if defined OBJ_ELF || defined OBJ_COFF
2926 ARM_SET_INTERWORK (symbolP, support_interwork);
2930 /* Directives: Mode selection. */
2932 /* .syntax [unified|divided] - choose the new unified syntax
2933 (same for Arm and Thumb encoding, modulo slight differences in what
2934 can be represented) or the old divergent syntax for each mode. */
2936 s_syntax (int unused ATTRIBUTE_UNUSED)
2940 name = input_line_pointer;
2941 delim = get_symbol_end ();
2943 if (!strcasecmp (name, "unified"))
2944 unified_syntax = TRUE;
2945 else if (!strcasecmp (name, "divided"))
2946 unified_syntax = FALSE;
2949 as_bad (_("unrecognized syntax mode \"%s\""), name);
2952 *input_line_pointer = delim;
2953 demand_empty_rest_of_line ();
2956 /* Directives: sectioning and alignment. */
2958 /* Same as s_align_ptwo but align 0 => align 2. */
2961 s_align (int unused ATTRIBUTE_UNUSED)
2966 long max_alignment = 15;
2968 temp = get_absolute_expression ();
2969 if (temp > max_alignment)
2970 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2973 as_bad (_("alignment negative. 0 assumed."));
2977 if (*input_line_pointer == ',')
2979 input_line_pointer++;
2980 temp_fill = get_absolute_expression ();
2992 /* Only make a frag if we HAVE to. */
2993 if (temp && !need_pass_2)
2995 if (!fill_p && subseg_text_p (now_seg))
2996 frag_align_code (temp, 0);
2998 frag_align (temp, (int) temp_fill, 0);
3000 demand_empty_rest_of_line ();
3002 record_alignment (now_seg, temp);
3006 s_bss (int ignore ATTRIBUTE_UNUSED)
3008 /* We don't support putting frags in the BSS segment, we fake it by
3009 marking in_bss, then looking at s_skip for clues. */
3010 subseg_set (bss_section, 0);
3011 demand_empty_rest_of_line ();
3013 #ifdef md_elf_section_change_hook
3014 md_elf_section_change_hook ();
3019 s_even (int ignore ATTRIBUTE_UNUSED)
3021 /* Never make frag if expect extra pass. */
3023 frag_align (1, 0, 0);
3025 record_alignment (now_seg, 1);
3027 demand_empty_rest_of_line ();
3030 /* Directives: CodeComposer Studio. */
3032 /* .ref (for CodeComposer Studio syntax only). */
3034 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3036 if (codecomposer_syntax)
3037 ignore_rest_of_line ();
3039 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3042 /* If name is not NULL, then it is used for marking the beginning of a
3043 function, wherease if it is NULL then it means the function end. */
3045 asmfunc_debug (const char * name)
3047 static const char * last_name = NULL;
3051 gas_assert (last_name == NULL);
3054 if (debug_type == DEBUG_STABS)
3055 stabs_generate_asm_func (name, name);
3059 gas_assert (last_name != NULL);
3061 if (debug_type == DEBUG_STABS)
3062 stabs_generate_asm_endfunc (last_name, last_name);
3069 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3071 if (codecomposer_syntax)
3073 switch (asmfunc_state)
3075 case OUTSIDE_ASMFUNC:
3076 asmfunc_state = WAITING_ASMFUNC_NAME;
3079 case WAITING_ASMFUNC_NAME:
3080 as_bad (_(".asmfunc repeated."));
3083 case WAITING_ENDASMFUNC:
3084 as_bad (_(".asmfunc without function."));
3087 demand_empty_rest_of_line ();
3090 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3094 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3096 if (codecomposer_syntax)
3098 switch (asmfunc_state)
3100 case OUTSIDE_ASMFUNC:
3101 as_bad (_(".endasmfunc without a .asmfunc."));
3104 case WAITING_ASMFUNC_NAME:
3105 as_bad (_(".endasmfunc without function."));
3108 case WAITING_ENDASMFUNC:
3109 asmfunc_state = OUTSIDE_ASMFUNC;
3110 asmfunc_debug (NULL);
3113 demand_empty_rest_of_line ();
3116 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3120 s_ccs_def (int name)
3122 if (codecomposer_syntax)
3125 as_bad (_(".def pseudo-op only available with -mccs flag."));
3128 /* Directives: Literal pools. */
3130 static literal_pool *
3131 find_literal_pool (void)
3133 literal_pool * pool;
3135 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3137 if (pool->section == now_seg
3138 && pool->sub_section == now_subseg)
3145 static literal_pool *
3146 find_or_make_literal_pool (void)
3148 /* Next literal pool ID number. */
3149 static unsigned int latest_pool_num = 1;
3150 literal_pool * pool;
3152 pool = find_literal_pool ();
3156 /* Create a new pool. */
3157 pool = (literal_pool *) xmalloc (sizeof (* pool));
3161 pool->next_free_entry = 0;
3162 pool->section = now_seg;
3163 pool->sub_section = now_subseg;
3164 pool->next = list_of_pools;
3165 pool->symbol = NULL;
3166 pool->alignment = 2;
3168 /* Add it to the list. */
3169 list_of_pools = pool;
3172 /* New pools, and emptied pools, will have a NULL symbol. */
3173 if (pool->symbol == NULL)
3175 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3176 (valueT) 0, &zero_address_frag);
3177 pool->id = latest_pool_num ++;
3184 /* Add the literal in the global 'inst'
3185 structure to the relevant literal pool. */
3188 add_to_lit_pool (unsigned int nbytes)
3190 #define PADDING_SLOT 0x1
3191 #define LIT_ENTRY_SIZE_MASK 0xFF
3192 literal_pool * pool;
3193 unsigned int entry, pool_size = 0;
3194 bfd_boolean padding_slot_p = FALSE;
3200 imm1 = inst.operands[1].imm;
3201 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3202 : inst.reloc.exp.X_unsigned ? 0
3203 : ((int64_t) inst.operands[1].imm) >> 32);
3204 if (target_big_endian)
3207 imm2 = inst.operands[1].imm;
3211 pool = find_or_make_literal_pool ();
3213 /* Check if this literal value is already in the pool. */
3214 for (entry = 0; entry < pool->next_free_entry; entry ++)
3218 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3219 && (inst.reloc.exp.X_op == O_constant)
3220 && (pool->literals[entry].X_add_number
3221 == inst.reloc.exp.X_add_number)
3222 && (pool->literals[entry].X_md == nbytes)
3223 && (pool->literals[entry].X_unsigned
3224 == inst.reloc.exp.X_unsigned))
3227 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3228 && (inst.reloc.exp.X_op == O_symbol)
3229 && (pool->literals[entry].X_add_number
3230 == inst.reloc.exp.X_add_number)
3231 && (pool->literals[entry].X_add_symbol
3232 == inst.reloc.exp.X_add_symbol)
3233 && (pool->literals[entry].X_op_symbol
3234 == inst.reloc.exp.X_op_symbol)
3235 && (pool->literals[entry].X_md == nbytes))
3238 else if ((nbytes == 8)
3239 && !(pool_size & 0x7)
3240 && ((entry + 1) != pool->next_free_entry)
3241 && (pool->literals[entry].X_op == O_constant)
3242 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3243 && (pool->literals[entry].X_unsigned
3244 == inst.reloc.exp.X_unsigned)
3245 && (pool->literals[entry + 1].X_op == O_constant)
3246 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3247 && (pool->literals[entry + 1].X_unsigned
3248 == inst.reloc.exp.X_unsigned))
3251 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3252 if (padding_slot_p && (nbytes == 4))
3258 /* Do we need to create a new entry? */
3259 if (entry == pool->next_free_entry)
3261 if (entry >= MAX_LITERAL_POOL_SIZE)
3263 inst.error = _("literal pool overflow");
3269 /* For 8-byte entries, we align to an 8-byte boundary,
3270 and split it into two 4-byte entries, because on 32-bit
3271 host, 8-byte constants are treated as big num, thus
3272 saved in "generic_bignum" which will be overwritten
3273 by later assignments.
3275 We also need to make sure there is enough space for
3278 We also check to make sure the literal operand is a
3280 if (!(inst.reloc.exp.X_op == O_constant
3281 || inst.reloc.exp.X_op == O_big))
3283 inst.error = _("invalid type for literal pool");
3286 else if (pool_size & 0x7)
3288 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3290 inst.error = _("literal pool overflow");
3294 pool->literals[entry] = inst.reloc.exp;
3295 pool->literals[entry].X_add_number = 0;
3296 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3297 pool->next_free_entry += 1;
3300 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3302 inst.error = _("literal pool overflow");
3306 pool->literals[entry] = inst.reloc.exp;
3307 pool->literals[entry].X_op = O_constant;
3308 pool->literals[entry].X_add_number = imm1;
3309 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3310 pool->literals[entry++].X_md = 4;
3311 pool->literals[entry] = inst.reloc.exp;
3312 pool->literals[entry].X_op = O_constant;
3313 pool->literals[entry].X_add_number = imm2;
3314 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3315 pool->literals[entry].X_md = 4;
3316 pool->alignment = 3;
3317 pool->next_free_entry += 1;
3321 pool->literals[entry] = inst.reloc.exp;
3322 pool->literals[entry].X_md = 4;
3326 /* PR ld/12974: Record the location of the first source line to reference
3327 this entry in the literal pool. If it turns out during linking that the
3328 symbol does not exist we will be able to give an accurate line number for
3329 the (first use of the) missing reference. */
3330 if (debug_type == DEBUG_DWARF2)
3331 dwarf2_where (pool->locs + entry);
3333 pool->next_free_entry += 1;
3335 else if (padding_slot_p)
3337 pool->literals[entry] = inst.reloc.exp;
3338 pool->literals[entry].X_md = nbytes;
3341 inst.reloc.exp.X_op = O_symbol;
3342 inst.reloc.exp.X_add_number = pool_size;
3343 inst.reloc.exp.X_add_symbol = pool->symbol;
3349 tc_start_label_without_colon (char unused1 ATTRIBUTE_UNUSED, const char * rest)
3351 bfd_boolean ret = TRUE;
3353 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3355 const char *label = rest;
3357 while (!is_end_of_line[(int) label[-1]])
3362 as_bad (_("Invalid label '%s'"), label);
3366 asmfunc_debug (label);
3368 asmfunc_state = WAITING_ENDASMFUNC;
3374 /* Can't use symbol_new here, so have to create a symbol and then at
3375 a later date assign it a value. Thats what these functions do. */
3378 symbol_locate (symbolS * symbolP,
3379 const char * name, /* It is copied, the caller can modify. */
3380 segT segment, /* Segment identifier (SEG_<something>). */
3381 valueT valu, /* Symbol value. */
3382 fragS * frag) /* Associated fragment. */
3385 char * preserved_copy_of_name;
3387 name_length = strlen (name) + 1; /* +1 for \0. */
3388 obstack_grow (¬es, name, name_length);
3389 preserved_copy_of_name = (char *) obstack_finish (¬es);
3391 #ifdef tc_canonicalize_symbol_name
3392 preserved_copy_of_name =
3393 tc_canonicalize_symbol_name (preserved_copy_of_name);
3396 S_SET_NAME (symbolP, preserved_copy_of_name);
3398 S_SET_SEGMENT (symbolP, segment);
3399 S_SET_VALUE (symbolP, valu);
3400 symbol_clear_list_pointers (symbolP);
3402 symbol_set_frag (symbolP, frag);
3404 /* Link to end of symbol chain. */
3406 extern int symbol_table_frozen;
3408 if (symbol_table_frozen)
3412 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3414 obj_symbol_new_hook (symbolP);
3416 #ifdef tc_symbol_new_hook
3417 tc_symbol_new_hook (symbolP);
3421 verify_symbol_chain (symbol_rootP, symbol_lastP);
3422 #endif /* DEBUG_SYMS */
3426 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3429 literal_pool * pool;
3432 pool = find_literal_pool ();
3434 || pool->symbol == NULL
3435 || pool->next_free_entry == 0)
3438 /* Align pool as you have word accesses.
3439 Only make a frag if we have to. */
3441 frag_align (pool->alignment, 0, 0);
3443 record_alignment (now_seg, 2);
3446 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3447 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3449 sprintf (sym_name, "$$lit_\002%x", pool->id);
3451 symbol_locate (pool->symbol, sym_name, now_seg,
3452 (valueT) frag_now_fix (), frag_now);
3453 symbol_table_insert (pool->symbol);
3455 ARM_SET_THUMB (pool->symbol, thumb_mode);
3457 #if defined OBJ_COFF || defined OBJ_ELF
3458 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3461 for (entry = 0; entry < pool->next_free_entry; entry ++)
3464 if (debug_type == DEBUG_DWARF2)
3465 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3467 /* First output the expression in the instruction to the pool. */
3468 emit_expr (&(pool->literals[entry]),
3469 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3472 /* Mark the pool as empty. */
3473 pool->next_free_entry = 0;
3474 pool->symbol = NULL;
3478 /* Forward declarations for functions below, in the MD interface
3480 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3481 static valueT create_unwind_entry (int);
3482 static void start_unwind_section (const segT, int);
3483 static void add_unwind_opcode (valueT, int);
3484 static void flush_pending_unwind (void);
3486 /* Directives: Data. */
3489 s_arm_elf_cons (int nbytes)
3493 #ifdef md_flush_pending_output
3494 md_flush_pending_output ();
3497 if (is_it_end_of_statement ())
3499 demand_empty_rest_of_line ();
3503 #ifdef md_cons_align
3504 md_cons_align (nbytes);
3507 mapping_state (MAP_DATA);
3511 char *base = input_line_pointer;
3515 if (exp.X_op != O_symbol)
3516 emit_expr (&exp, (unsigned int) nbytes);
3519 char *before_reloc = input_line_pointer;
3520 reloc = parse_reloc (&input_line_pointer);
3523 as_bad (_("unrecognized relocation suffix"));
3524 ignore_rest_of_line ();
3527 else if (reloc == BFD_RELOC_UNUSED)
3528 emit_expr (&exp, (unsigned int) nbytes);
3531 reloc_howto_type *howto = (reloc_howto_type *)
3532 bfd_reloc_type_lookup (stdoutput,
3533 (bfd_reloc_code_real_type) reloc);
3534 int size = bfd_get_reloc_size (howto);
3536 if (reloc == BFD_RELOC_ARM_PLT32)
3538 as_bad (_("(plt) is only valid on branch targets"));
3539 reloc = BFD_RELOC_UNUSED;
3544 as_bad (_("%s relocations do not fit in %d bytes"),
3545 howto->name, nbytes);
3548 /* We've parsed an expression stopping at O_symbol.
3549 But there may be more expression left now that we
3550 have parsed the relocation marker. Parse it again.
3551 XXX Surely there is a cleaner way to do this. */
3552 char *p = input_line_pointer;
3554 char *save_buf = (char *) alloca (input_line_pointer - base);
3555 memcpy (save_buf, base, input_line_pointer - base);
3556 memmove (base + (input_line_pointer - before_reloc),
3557 base, before_reloc - base);
3559 input_line_pointer = base + (input_line_pointer-before_reloc);
3561 memcpy (base, save_buf, p - base);
3563 offset = nbytes - size;
3564 p = frag_more (nbytes);
3565 memset (p, 0, nbytes);
3566 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3567 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3572 while (*input_line_pointer++ == ',');
3574 /* Put terminator back into stream. */
3575 input_line_pointer --;
3576 demand_empty_rest_of_line ();
3579 /* Emit an expression containing a 32-bit thumb instruction.
3580 Implementation based on put_thumb32_insn. */
3583 emit_thumb32_expr (expressionS * exp)
3585 expressionS exp_high = *exp;
3587 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3588 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3589 exp->X_add_number &= 0xffff;
3590 emit_expr (exp, (unsigned int) THUMB_SIZE);
3593 /* Guess the instruction size based on the opcode. */
3596 thumb_insn_size (int opcode)
3598 if ((unsigned int) opcode < 0xe800u)
3600 else if ((unsigned int) opcode >= 0xe8000000u)
3607 emit_insn (expressionS *exp, int nbytes)
3611 if (exp->X_op == O_constant)
3616 size = thumb_insn_size (exp->X_add_number);
3620 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3622 as_bad (_(".inst.n operand too big. "\
3623 "Use .inst.w instead"));
3628 if (now_it.state == AUTOMATIC_IT_BLOCK)
3629 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3631 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3633 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3634 emit_thumb32_expr (exp);
3636 emit_expr (exp, (unsigned int) size);
3638 it_fsm_post_encode ();
3642 as_bad (_("cannot determine Thumb instruction size. " \
3643 "Use .inst.n/.inst.w instead"));
3646 as_bad (_("constant expression required"));
3651 /* Like s_arm_elf_cons but do not use md_cons_align and
3652 set the mapping state to MAP_ARM/MAP_THUMB. */
3655 s_arm_elf_inst (int nbytes)
3657 if (is_it_end_of_statement ())
3659 demand_empty_rest_of_line ();
3663 /* Calling mapping_state () here will not change ARM/THUMB,
3664 but will ensure not to be in DATA state. */
3667 mapping_state (MAP_THUMB);
3672 as_bad (_("width suffixes are invalid in ARM mode"));
3673 ignore_rest_of_line ();
3679 mapping_state (MAP_ARM);
3688 if (! emit_insn (& exp, nbytes))
3690 ignore_rest_of_line ();
3694 while (*input_line_pointer++ == ',');
3696 /* Put terminator back into stream. */
3697 input_line_pointer --;
3698 demand_empty_rest_of_line ();
3701 /* Parse a .rel31 directive. */
3704 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3711 if (*input_line_pointer == '1')
3712 highbit = 0x80000000;
3713 else if (*input_line_pointer != '0')
3714 as_bad (_("expected 0 or 1"));
3716 input_line_pointer++;
3717 if (*input_line_pointer != ',')
3718 as_bad (_("missing comma"));
3719 input_line_pointer++;
3721 #ifdef md_flush_pending_output
3722 md_flush_pending_output ();
3725 #ifdef md_cons_align
3729 mapping_state (MAP_DATA);
3734 md_number_to_chars (p, highbit, 4);
3735 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3736 BFD_RELOC_ARM_PREL31);
3738 demand_empty_rest_of_line ();
3741 /* Directives: AEABI stack-unwind tables. */
3743 /* Parse an unwind_fnstart directive. Simply records the current location. */
3746 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3748 demand_empty_rest_of_line ();
3749 if (unwind.proc_start)
3751 as_bad (_("duplicate .fnstart directive"));
3755 /* Mark the start of the function. */
3756 unwind.proc_start = expr_build_dot ();
3758 /* Reset the rest of the unwind info. */
3759 unwind.opcode_count = 0;
3760 unwind.table_entry = NULL;
3761 unwind.personality_routine = NULL;
3762 unwind.personality_index = -1;
3763 unwind.frame_size = 0;
3764 unwind.fp_offset = 0;
3765 unwind.fp_reg = REG_SP;
3767 unwind.sp_restored = 0;
3771 /* Parse a handlerdata directive. Creates the exception handling table entry
3772 for the function. */
3775 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3777 demand_empty_rest_of_line ();
3778 if (!unwind.proc_start)
3779 as_bad (MISSING_FNSTART);
3781 if (unwind.table_entry)
3782 as_bad (_("duplicate .handlerdata directive"));
3784 create_unwind_entry (1);
3787 /* Parse an unwind_fnend directive. Generates the index table entry. */
3790 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3795 unsigned int marked_pr_dependency;
3797 demand_empty_rest_of_line ();
3799 if (!unwind.proc_start)
3801 as_bad (_(".fnend directive without .fnstart"));
3805 /* Add eh table entry. */
3806 if (unwind.table_entry == NULL)
3807 val = create_unwind_entry (0);
3811 /* Add index table entry. This is two words. */
3812 start_unwind_section (unwind.saved_seg, 1);
3813 frag_align (2, 0, 0);
3814 record_alignment (now_seg, 2);
3816 ptr = frag_more (8);
3818 where = frag_now_fix () - 8;
3820 /* Self relative offset of the function start. */
3821 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3822 BFD_RELOC_ARM_PREL31);
3824 /* Indicate dependency on EHABI-defined personality routines to the
3825 linker, if it hasn't been done already. */
3826 marked_pr_dependency
3827 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3828 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3829 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3831 static const char *const name[] =
3833 "__aeabi_unwind_cpp_pr0",
3834 "__aeabi_unwind_cpp_pr1",
3835 "__aeabi_unwind_cpp_pr2"
3837 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3838 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3839 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3840 |= 1 << unwind.personality_index;
3844 /* Inline exception table entry. */
3845 md_number_to_chars (ptr + 4, val, 4);
3847 /* Self relative offset of the table entry. */
3848 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3849 BFD_RELOC_ARM_PREL31);
3851 /* Restore the original section. */
3852 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3854 unwind.proc_start = NULL;
3858 /* Parse an unwind_cantunwind directive. */
3861 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3863 demand_empty_rest_of_line ();
3864 if (!unwind.proc_start)
3865 as_bad (MISSING_FNSTART);
3867 if (unwind.personality_routine || unwind.personality_index != -1)
3868 as_bad (_("personality routine specified for cantunwind frame"));
3870 unwind.personality_index = -2;
3874 /* Parse a personalityindex directive. */
3877 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3881 if (!unwind.proc_start)
3882 as_bad (MISSING_FNSTART);
3884 if (unwind.personality_routine || unwind.personality_index != -1)
3885 as_bad (_("duplicate .personalityindex directive"));
3889 if (exp.X_op != O_constant
3890 || exp.X_add_number < 0 || exp.X_add_number > 15)
3892 as_bad (_("bad personality routine number"));
3893 ignore_rest_of_line ();
3897 unwind.personality_index = exp.X_add_number;
3899 demand_empty_rest_of_line ();
3903 /* Parse a personality directive. */
3906 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3910 if (!unwind.proc_start)
3911 as_bad (MISSING_FNSTART);
3913 if (unwind.personality_routine || unwind.personality_index != -1)
3914 as_bad (_("duplicate .personality directive"));
3916 name = input_line_pointer;
3917 c = get_symbol_end ();
3918 p = input_line_pointer;
3919 unwind.personality_routine = symbol_find_or_make (name);
3921 demand_empty_rest_of_line ();
3925 /* Parse a directive saving core registers. */
3928 s_arm_unwind_save_core (void)
3934 range = parse_reg_list (&input_line_pointer);
3937 as_bad (_("expected register list"));
3938 ignore_rest_of_line ();
3942 demand_empty_rest_of_line ();
3944 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3945 into .unwind_save {..., sp...}. We aren't bothered about the value of
3946 ip because it is clobbered by calls. */
3947 if (unwind.sp_restored && unwind.fp_reg == 12
3948 && (range & 0x3000) == 0x1000)
3950 unwind.opcode_count--;
3951 unwind.sp_restored = 0;
3952 range = (range | 0x2000) & ~0x1000;
3953 unwind.pending_offset = 0;
3959 /* See if we can use the short opcodes. These pop a block of up to 8
3960 registers starting with r4, plus maybe r14. */
3961 for (n = 0; n < 8; n++)
3963 /* Break at the first non-saved register. */
3964 if ((range & (1 << (n + 4))) == 0)
3967 /* See if there are any other bits set. */
3968 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3970 /* Use the long form. */
3971 op = 0x8000 | ((range >> 4) & 0xfff);
3972 add_unwind_opcode (op, 2);
3976 /* Use the short form. */
3978 op = 0xa8; /* Pop r14. */
3980 op = 0xa0; /* Do not pop r14. */
3982 add_unwind_opcode (op, 1);
3989 op = 0xb100 | (range & 0xf);
3990 add_unwind_opcode (op, 2);
3993 /* Record the number of bytes pushed. */
3994 for (n = 0; n < 16; n++)
3996 if (range & (1 << n))
3997 unwind.frame_size += 4;
4002 /* Parse a directive saving FPA registers. */
4005 s_arm_unwind_save_fpa (int reg)
4011 /* Get Number of registers to transfer. */
4012 if (skip_past_comma (&input_line_pointer) != FAIL)
4015 exp.X_op = O_illegal;
4017 if (exp.X_op != O_constant)
4019 as_bad (_("expected , <constant>"));
4020 ignore_rest_of_line ();
4024 num_regs = exp.X_add_number;
4026 if (num_regs < 1 || num_regs > 4)
4028 as_bad (_("number of registers must be in the range [1:4]"));
4029 ignore_rest_of_line ();
4033 demand_empty_rest_of_line ();
4038 op = 0xb4 | (num_regs - 1);
4039 add_unwind_opcode (op, 1);
4044 op = 0xc800 | (reg << 4) | (num_regs - 1);
4045 add_unwind_opcode (op, 2);
4047 unwind.frame_size += num_regs * 12;
4051 /* Parse a directive saving VFP registers for ARMv6 and above. */
4054 s_arm_unwind_save_vfp_armv6 (void)
4059 int num_vfpv3_regs = 0;
4060 int num_regs_below_16;
4062 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4065 as_bad (_("expected register list"));
4066 ignore_rest_of_line ();
4070 demand_empty_rest_of_line ();
4072 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4073 than FSTMX/FLDMX-style ones). */
4075 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4077 num_vfpv3_regs = count;
4078 else if (start + count > 16)
4079 num_vfpv3_regs = start + count - 16;
4081 if (num_vfpv3_regs > 0)
4083 int start_offset = start > 16 ? start - 16 : 0;
4084 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4085 add_unwind_opcode (op, 2);
4088 /* Generate opcode for registers numbered in the range 0 .. 15. */
4089 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4090 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4091 if (num_regs_below_16 > 0)
4093 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4094 add_unwind_opcode (op, 2);
4097 unwind.frame_size += count * 8;
4101 /* Parse a directive saving VFP registers for pre-ARMv6. */
4104 s_arm_unwind_save_vfp (void)
4110 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
4113 as_bad (_("expected register list"));
4114 ignore_rest_of_line ();
4118 demand_empty_rest_of_line ();
4123 op = 0xb8 | (count - 1);
4124 add_unwind_opcode (op, 1);
4129 op = 0xb300 | (reg << 4) | (count - 1);
4130 add_unwind_opcode (op, 2);
4132 unwind.frame_size += count * 8 + 4;
4136 /* Parse a directive saving iWMMXt data registers. */
4139 s_arm_unwind_save_mmxwr (void)
4147 if (*input_line_pointer == '{')
4148 input_line_pointer++;
4152 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4156 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4161 as_tsktsk (_("register list not in ascending order"));
4164 if (*input_line_pointer == '-')
4166 input_line_pointer++;
4167 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4170 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4173 else if (reg >= hi_reg)
4175 as_bad (_("bad register range"));
4178 for (; reg < hi_reg; reg++)
4182 while (skip_past_comma (&input_line_pointer) != FAIL);
4184 skip_past_char (&input_line_pointer, '}');
4186 demand_empty_rest_of_line ();
4188 /* Generate any deferred opcodes because we're going to be looking at
4190 flush_pending_unwind ();
4192 for (i = 0; i < 16; i++)
4194 if (mask & (1 << i))
4195 unwind.frame_size += 8;
4198 /* Attempt to combine with a previous opcode. We do this because gcc
4199 likes to output separate unwind directives for a single block of
4201 if (unwind.opcode_count > 0)
4203 i = unwind.opcodes[unwind.opcode_count - 1];
4204 if ((i & 0xf8) == 0xc0)
4207 /* Only merge if the blocks are contiguous. */
4210 if ((mask & 0xfe00) == (1 << 9))
4212 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4213 unwind.opcode_count--;
4216 else if (i == 6 && unwind.opcode_count >= 2)
4218 i = unwind.opcodes[unwind.opcode_count - 2];
4222 op = 0xffff << (reg - 1);
4224 && ((mask & op) == (1u << (reg - 1))))
4226 op = (1 << (reg + i + 1)) - 1;
4227 op &= ~((1 << reg) - 1);
4229 unwind.opcode_count -= 2;
4236 /* We want to generate opcodes in the order the registers have been
4237 saved, ie. descending order. */
4238 for (reg = 15; reg >= -1; reg--)
4240 /* Save registers in blocks. */
4242 || !(mask & (1 << reg)))
4244 /* We found an unsaved reg. Generate opcodes to save the
4251 op = 0xc0 | (hi_reg - 10);
4252 add_unwind_opcode (op, 1);
4257 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4258 add_unwind_opcode (op, 2);
4267 ignore_rest_of_line ();
4271 s_arm_unwind_save_mmxwcg (void)
4278 if (*input_line_pointer == '{')
4279 input_line_pointer++;
4281 skip_whitespace (input_line_pointer);
4285 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4289 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4295 as_tsktsk (_("register list not in ascending order"));
4298 if (*input_line_pointer == '-')
4300 input_line_pointer++;
4301 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4304 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4307 else if (reg >= hi_reg)
4309 as_bad (_("bad register range"));
4312 for (; reg < hi_reg; reg++)
4316 while (skip_past_comma (&input_line_pointer) != FAIL);
4318 skip_past_char (&input_line_pointer, '}');
4320 demand_empty_rest_of_line ();
4322 /* Generate any deferred opcodes because we're going to be looking at
4324 flush_pending_unwind ();
4326 for (reg = 0; reg < 16; reg++)
4328 if (mask & (1 << reg))
4329 unwind.frame_size += 4;
4332 add_unwind_opcode (op, 2);
4335 ignore_rest_of_line ();
4339 /* Parse an unwind_save directive.
4340 If the argument is non-zero, this is a .vsave directive. */
4343 s_arm_unwind_save (int arch_v6)
4346 struct reg_entry *reg;
4347 bfd_boolean had_brace = FALSE;
4349 if (!unwind.proc_start)
4350 as_bad (MISSING_FNSTART);
4352 /* Figure out what sort of save we have. */
4353 peek = input_line_pointer;
4361 reg = arm_reg_parse_multi (&peek);
4365 as_bad (_("register expected"));
4366 ignore_rest_of_line ();
4375 as_bad (_("FPA .unwind_save does not take a register list"));
4376 ignore_rest_of_line ();
4379 input_line_pointer = peek;
4380 s_arm_unwind_save_fpa (reg->number);
4384 s_arm_unwind_save_core ();
4389 s_arm_unwind_save_vfp_armv6 ();
4391 s_arm_unwind_save_vfp ();
4394 case REG_TYPE_MMXWR:
4395 s_arm_unwind_save_mmxwr ();
4398 case REG_TYPE_MMXWCG:
4399 s_arm_unwind_save_mmxwcg ();
4403 as_bad (_(".unwind_save does not support this kind of register"));
4404 ignore_rest_of_line ();
4409 /* Parse an unwind_movsp directive. */
4412 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4418 if (!unwind.proc_start)
4419 as_bad (MISSING_FNSTART);
4421 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4424 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4425 ignore_rest_of_line ();
4429 /* Optional constant. */
4430 if (skip_past_comma (&input_line_pointer) != FAIL)
4432 if (immediate_for_directive (&offset) == FAIL)
4438 demand_empty_rest_of_line ();
4440 if (reg == REG_SP || reg == REG_PC)
4442 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4446 if (unwind.fp_reg != REG_SP)
4447 as_bad (_("unexpected .unwind_movsp directive"));
4449 /* Generate opcode to restore the value. */
4451 add_unwind_opcode (op, 1);
4453 /* Record the information for later. */
4454 unwind.fp_reg = reg;
4455 unwind.fp_offset = unwind.frame_size - offset;
4456 unwind.sp_restored = 1;
4459 /* Parse an unwind_pad directive. */
4462 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4466 if (!unwind.proc_start)
4467 as_bad (MISSING_FNSTART);
4469 if (immediate_for_directive (&offset) == FAIL)
4474 as_bad (_("stack increment must be multiple of 4"));
4475 ignore_rest_of_line ();
4479 /* Don't generate any opcodes, just record the details for later. */
4480 unwind.frame_size += offset;
4481 unwind.pending_offset += offset;
4483 demand_empty_rest_of_line ();
4486 /* Parse an unwind_setfp directive. */
4489 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4495 if (!unwind.proc_start)
4496 as_bad (MISSING_FNSTART);
4498 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4499 if (skip_past_comma (&input_line_pointer) == FAIL)
4502 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4504 if (fp_reg == FAIL || sp_reg == FAIL)
4506 as_bad (_("expected <reg>, <reg>"));
4507 ignore_rest_of_line ();
4511 /* Optional constant. */
4512 if (skip_past_comma (&input_line_pointer) != FAIL)
4514 if (immediate_for_directive (&offset) == FAIL)
4520 demand_empty_rest_of_line ();
4522 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4524 as_bad (_("register must be either sp or set by a previous"
4525 "unwind_movsp directive"));
4529 /* Don't generate any opcodes, just record the information for later. */
4530 unwind.fp_reg = fp_reg;
4532 if (sp_reg == REG_SP)
4533 unwind.fp_offset = unwind.frame_size - offset;
4535 unwind.fp_offset -= offset;
4538 /* Parse an unwind_raw directive. */
4541 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4544 /* This is an arbitrary limit. */
4545 unsigned char op[16];
4548 if (!unwind.proc_start)
4549 as_bad (MISSING_FNSTART);
4552 if (exp.X_op == O_constant
4553 && skip_past_comma (&input_line_pointer) != FAIL)
4555 unwind.frame_size += exp.X_add_number;
4559 exp.X_op = O_illegal;
4561 if (exp.X_op != O_constant)
4563 as_bad (_("expected <offset>, <opcode>"));
4564 ignore_rest_of_line ();
4570 /* Parse the opcode. */
4575 as_bad (_("unwind opcode too long"));
4576 ignore_rest_of_line ();
4578 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4580 as_bad (_("invalid unwind opcode"));
4581 ignore_rest_of_line ();
4584 op[count++] = exp.X_add_number;
4586 /* Parse the next byte. */
4587 if (skip_past_comma (&input_line_pointer) == FAIL)
4593 /* Add the opcode bytes in reverse order. */
4595 add_unwind_opcode (op[count], 1);
4597 demand_empty_rest_of_line ();
4601 /* Parse a .eabi_attribute directive. */
4604 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4606 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4608 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4609 attributes_set_explicitly[tag] = 1;
4612 /* Emit a tls fix for the symbol. */
4615 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4619 #ifdef md_flush_pending_output
4620 md_flush_pending_output ();
4623 #ifdef md_cons_align
4627 /* Since we're just labelling the code, there's no need to define a
4630 p = obstack_next_free (&frchain_now->frch_obstack);
4631 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4632 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4633 : BFD_RELOC_ARM_TLS_DESCSEQ);
4635 #endif /* OBJ_ELF */
4637 static void s_arm_arch (int);
4638 static void s_arm_object_arch (int);
4639 static void s_arm_cpu (int);
4640 static void s_arm_fpu (int);
4641 static void s_arm_arch_extension (int);
4646 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4653 if (exp.X_op == O_symbol)
4654 exp.X_op = O_secrel;
4656 emit_expr (&exp, 4);
4658 while (*input_line_pointer++ == ',');
4660 input_line_pointer--;
4661 demand_empty_rest_of_line ();
4665 /* This table describes all the machine specific pseudo-ops the assembler
4666 has to support. The fields are:
4667 pseudo-op name without dot
4668 function to call to execute this pseudo-op
4669 Integer arg to pass to the function. */
4671 const pseudo_typeS md_pseudo_table[] =
4673 /* Never called because '.req' does not start a line. */
4674 { "req", s_req, 0 },
4675 /* Following two are likewise never called. */
4678 { "unreq", s_unreq, 0 },
4679 { "bss", s_bss, 0 },
4680 { "align", s_align, 0 },
4681 { "arm", s_arm, 0 },
4682 { "thumb", s_thumb, 0 },
4683 { "code", s_code, 0 },
4684 { "force_thumb", s_force_thumb, 0 },
4685 { "thumb_func", s_thumb_func, 0 },
4686 { "thumb_set", s_thumb_set, 0 },
4687 { "even", s_even, 0 },
4688 { "ltorg", s_ltorg, 0 },
4689 { "pool", s_ltorg, 0 },
4690 { "syntax", s_syntax, 0 },
4691 { "cpu", s_arm_cpu, 0 },
4692 { "arch", s_arm_arch, 0 },
4693 { "object_arch", s_arm_object_arch, 0 },
4694 { "fpu", s_arm_fpu, 0 },
4695 { "arch_extension", s_arm_arch_extension, 0 },
4697 { "word", s_arm_elf_cons, 4 },
4698 { "long", s_arm_elf_cons, 4 },
4699 { "inst.n", s_arm_elf_inst, 2 },
4700 { "inst.w", s_arm_elf_inst, 4 },
4701 { "inst", s_arm_elf_inst, 0 },
4702 { "rel31", s_arm_rel31, 0 },
4703 { "fnstart", s_arm_unwind_fnstart, 0 },
4704 { "fnend", s_arm_unwind_fnend, 0 },
4705 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4706 { "personality", s_arm_unwind_personality, 0 },
4707 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4708 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4709 { "save", s_arm_unwind_save, 0 },
4710 { "vsave", s_arm_unwind_save, 1 },
4711 { "movsp", s_arm_unwind_movsp, 0 },
4712 { "pad", s_arm_unwind_pad, 0 },
4713 { "setfp", s_arm_unwind_setfp, 0 },
4714 { "unwind_raw", s_arm_unwind_raw, 0 },
4715 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4716 { "tlsdescseq", s_arm_tls_descseq, 0 },
4720 /* These are used for dwarf. */
4724 /* These are used for dwarf2. */
4725 { "file", (void (*) (int)) dwarf2_directive_file, 0 },
4726 { "loc", dwarf2_directive_loc, 0 },
4727 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4729 { "extend", float_cons, 'x' },
4730 { "ldouble", float_cons, 'x' },
4731 { "packed", float_cons, 'p' },
4733 {"secrel32", pe_directive_secrel, 0},
4736 /* These are for compatibility with CodeComposer Studio. */
4737 {"ref", s_ccs_ref, 0},
4738 {"def", s_ccs_def, 0},
4739 {"asmfunc", s_ccs_asmfunc, 0},
4740 {"endasmfunc", s_ccs_endasmfunc, 0},
4745 /* Parser functions used exclusively in instruction operands. */
4747 /* Generic immediate-value read function for use in insn parsing.
4748 STR points to the beginning of the immediate (the leading #);
4749 VAL receives the value; if the value is outside [MIN, MAX]
4750 issue an error. PREFIX_OPT is true if the immediate prefix is
4754 parse_immediate (char **str, int *val, int min, int max,
4755 bfd_boolean prefix_opt)
4758 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4759 if (exp.X_op != O_constant)
4761 inst.error = _("constant expression required");
4765 if (exp.X_add_number < min || exp.X_add_number > max)
4767 inst.error = _("immediate value out of range");
4771 *val = exp.X_add_number;
4775 /* Less-generic immediate-value read function with the possibility of loading a
4776 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4777 instructions. Puts the result directly in inst.operands[i]. */
4780 parse_big_immediate (char **str, int i, expressionS *in_exp,
4781 bfd_boolean allow_symbol_p)
4784 expressionS *exp_p = in_exp ? in_exp : &exp;
4787 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4789 if (exp_p->X_op == O_constant)
4791 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4792 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4793 O_constant. We have to be careful not to break compilation for
4794 32-bit X_add_number, though. */
4795 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4797 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4798 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4800 inst.operands[i].regisimm = 1;
4803 else if (exp_p->X_op == O_big
4804 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4806 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4808 /* Bignums have their least significant bits in
4809 generic_bignum[0]. Make sure we put 32 bits in imm and
4810 32 bits in reg, in a (hopefully) portable way. */
4811 gas_assert (parts != 0);
4813 /* Make sure that the number is not too big.
4814 PR 11972: Bignums can now be sign-extended to the
4815 size of a .octa so check that the out of range bits
4816 are all zero or all one. */
4817 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4819 LITTLENUM_TYPE m = -1;
4821 if (generic_bignum[parts * 2] != 0
4822 && generic_bignum[parts * 2] != m)
4825 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4826 if (generic_bignum[j] != generic_bignum[j-1])
4830 inst.operands[i].imm = 0;
4831 for (j = 0; j < parts; j++, idx++)
4832 inst.operands[i].imm |= generic_bignum[idx]
4833 << (LITTLENUM_NUMBER_OF_BITS * j);
4834 inst.operands[i].reg = 0;
4835 for (j = 0; j < parts; j++, idx++)
4836 inst.operands[i].reg |= generic_bignum[idx]
4837 << (LITTLENUM_NUMBER_OF_BITS * j);
4838 inst.operands[i].regisimm = 1;
4840 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4848 /* Returns the pseudo-register number of an FPA immediate constant,
4849 or FAIL if there isn't a valid constant here. */
4852 parse_fpa_immediate (char ** str)
4854 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4860 /* First try and match exact strings, this is to guarantee
4861 that some formats will work even for cross assembly. */
4863 for (i = 0; fp_const[i]; i++)
4865 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4869 *str += strlen (fp_const[i]);
4870 if (is_end_of_line[(unsigned char) **str])
4876 /* Just because we didn't get a match doesn't mean that the constant
4877 isn't valid, just that it is in a format that we don't
4878 automatically recognize. Try parsing it with the standard
4879 expression routines. */
4881 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4883 /* Look for a raw floating point number. */
4884 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4885 && is_end_of_line[(unsigned char) *save_in])
4887 for (i = 0; i < NUM_FLOAT_VALS; i++)
4889 for (j = 0; j < MAX_LITTLENUMS; j++)
4891 if (words[j] != fp_values[i][j])
4895 if (j == MAX_LITTLENUMS)
4903 /* Try and parse a more complex expression, this will probably fail
4904 unless the code uses a floating point prefix (eg "0f"). */
4905 save_in = input_line_pointer;
4906 input_line_pointer = *str;
4907 if (expression (&exp) == absolute_section
4908 && exp.X_op == O_big
4909 && exp.X_add_number < 0)
4911 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4913 if (gen_to_words (words, 5, (long) 15) == 0)
4915 for (i = 0; i < NUM_FLOAT_VALS; i++)
4917 for (j = 0; j < MAX_LITTLENUMS; j++)
4919 if (words[j] != fp_values[i][j])
4923 if (j == MAX_LITTLENUMS)
4925 *str = input_line_pointer;
4926 input_line_pointer = save_in;
4933 *str = input_line_pointer;
4934 input_line_pointer = save_in;
4935 inst.error = _("invalid FPA immediate expression");
4939 /* Returns 1 if a number has "quarter-precision" float format
4940 0baBbbbbbc defgh000 00000000 00000000. */
4943 is_quarter_float (unsigned imm)
4945 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4946 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4950 /* Detect the presence of a floating point or integer zero constant,
4954 parse_ifimm_zero (char **in)
4958 if (!is_immediate_prefix (**in))
4962 error_code = atof_generic (in, ".", EXP_CHARS,
4963 &generic_floating_point_number);
4966 && generic_floating_point_number.sign == '+'
4967 && (generic_floating_point_number.low
4968 > generic_floating_point_number.leader))
4974 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4975 0baBbbbbbc defgh000 00000000 00000000.
4976 The zero and minus-zero cases need special handling, since they can't be
4977 encoded in the "quarter-precision" float format, but can nonetheless be
4978 loaded as integer constants. */
4981 parse_qfloat_immediate (char **ccp, int *immed)
4985 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4986 int found_fpchar = 0;
4988 skip_past_char (&str, '#');
4990 /* We must not accidentally parse an integer as a floating-point number. Make
4991 sure that the value we parse is not an integer by checking for special
4992 characters '.' or 'e'.
4993 FIXME: This is a horrible hack, but doing better is tricky because type
4994 information isn't in a very usable state at parse time. */
4996 skip_whitespace (fpnum);
4998 if (strncmp (fpnum, "0x", 2) == 0)
5002 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5003 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5013 if ((str = atof_ieee (str, 's', words)) != NULL)
5015 unsigned fpword = 0;
5018 /* Our FP word must be 32 bits (single-precision FP). */
5019 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5021 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5025 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5038 /* Shift operands. */
5041 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5044 struct asm_shift_name
5047 enum shift_kind kind;
5050 /* Third argument to parse_shift. */
5051 enum parse_shift_mode
5053 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5054 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5055 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5056 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5057 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5060 /* Parse a <shift> specifier on an ARM data processing instruction.
5061 This has three forms:
5063 (LSL|LSR|ASL|ASR|ROR) Rs
5064 (LSL|LSR|ASL|ASR|ROR) #imm
5067 Note that ASL is assimilated to LSL in the instruction encoding, and
5068 RRX to ROR #0 (which cannot be written as such). */
5071 parse_shift (char **str, int i, enum parse_shift_mode mode)
5073 const struct asm_shift_name *shift_name;
5074 enum shift_kind shift;
5079 for (p = *str; ISALPHA (*p); p++)
5084 inst.error = _("shift expression expected");
5088 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5091 if (shift_name == NULL)
5093 inst.error = _("shift expression expected");
5097 shift = shift_name->kind;
5101 case NO_SHIFT_RESTRICT:
5102 case SHIFT_IMMEDIATE: break;
5104 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5105 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5107 inst.error = _("'LSL' or 'ASR' required");
5112 case SHIFT_LSL_IMMEDIATE:
5113 if (shift != SHIFT_LSL)
5115 inst.error = _("'LSL' required");
5120 case SHIFT_ASR_IMMEDIATE:
5121 if (shift != SHIFT_ASR)
5123 inst.error = _("'ASR' required");
5131 if (shift != SHIFT_RRX)
5133 /* Whitespace can appear here if the next thing is a bare digit. */
5134 skip_whitespace (p);
5136 if (mode == NO_SHIFT_RESTRICT
5137 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5139 inst.operands[i].imm = reg;
5140 inst.operands[i].immisreg = 1;
5142 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5145 inst.operands[i].shift_kind = shift;
5146 inst.operands[i].shifted = 1;
5151 /* Parse a <shifter_operand> for an ARM data processing instruction:
5154 #<immediate>, <rotate>
5158 where <shift> is defined by parse_shift above, and <rotate> is a
5159 multiple of 2 between 0 and 30. Validation of immediate operands
5160 is deferred to md_apply_fix. */
5163 parse_shifter_operand (char **str, int i)
5168 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5170 inst.operands[i].reg = value;
5171 inst.operands[i].isreg = 1;
5173 /* parse_shift will override this if appropriate */
5174 inst.reloc.exp.X_op = O_constant;
5175 inst.reloc.exp.X_add_number = 0;
5177 if (skip_past_comma (str) == FAIL)
5180 /* Shift operation on register. */
5181 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5184 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5187 if (skip_past_comma (str) == SUCCESS)
5189 /* #x, y -- ie explicit rotation by Y. */
5190 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5193 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5195 inst.error = _("constant expression expected");
5199 value = exp.X_add_number;
5200 if (value < 0 || value > 30 || value % 2 != 0)
5202 inst.error = _("invalid rotation");
5205 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5207 inst.error = _("invalid constant");
5211 /* Encode as specified. */
5212 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5216 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5217 inst.reloc.pc_rel = 0;
5221 /* Group relocation information. Each entry in the table contains the
5222 textual name of the relocation as may appear in assembler source
5223 and must end with a colon.
5224 Along with this textual name are the relocation codes to be used if
5225 the corresponding instruction is an ALU instruction (ADD or SUB only),
5226 an LDR, an LDRS, or an LDC. */
5228 struct group_reloc_table_entry
5239 /* Varieties of non-ALU group relocation. */
5246 static struct group_reloc_table_entry group_reloc_table[] =
5247 { /* Program counter relative: */
5249 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5254 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5255 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5256 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5257 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5259 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5264 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5265 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5266 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5267 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5269 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5270 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5271 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5272 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5273 /* Section base relative */
5275 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5280 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5281 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5282 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5283 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5285 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5290 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5291 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5292 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5293 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5295 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5296 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5297 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5298 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
5300 /* Given the address of a pointer pointing to the textual name of a group
5301 relocation as may appear in assembler source, attempt to find its details
5302 in group_reloc_table. The pointer will be updated to the character after
5303 the trailing colon. On failure, FAIL will be returned; SUCCESS
5304 otherwise. On success, *entry will be updated to point at the relevant
5305 group_reloc_table entry. */
5308 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5311 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5313 int length = strlen (group_reloc_table[i].name);
5315 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5316 && (*str)[length] == ':')
5318 *out = &group_reloc_table[i];
5319 *str += (length + 1);
5327 /* Parse a <shifter_operand> for an ARM data processing instruction
5328 (as for parse_shifter_operand) where group relocations are allowed:
5331 #<immediate>, <rotate>
5332 #:<group_reloc>:<expression>
5336 where <group_reloc> is one of the strings defined in group_reloc_table.
5337 The hashes are optional.
5339 Everything else is as for parse_shifter_operand. */
5341 static parse_operand_result
5342 parse_shifter_operand_group_reloc (char **str, int i)
5344 /* Determine if we have the sequence of characters #: or just :
5345 coming next. If we do, then we check for a group relocation.
5346 If we don't, punt the whole lot to parse_shifter_operand. */
5348 if (((*str)[0] == '#' && (*str)[1] == ':')
5349 || (*str)[0] == ':')
5351 struct group_reloc_table_entry *entry;
5353 if ((*str)[0] == '#')
5358 /* Try to parse a group relocation. Anything else is an error. */
5359 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5361 inst.error = _("unknown group relocation");
5362 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5365 /* We now have the group relocation table entry corresponding to
5366 the name in the assembler source. Next, we parse the expression. */
5367 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5368 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5370 /* Record the relocation type (always the ALU variant here). */
5371 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5372 gas_assert (inst.reloc.type != 0);
5374 return PARSE_OPERAND_SUCCESS;
5377 return parse_shifter_operand (str, i) == SUCCESS
5378 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5380 /* Never reached. */
5383 /* Parse a Neon alignment expression. Information is written to
5384 inst.operands[i]. We assume the initial ':' has been skipped.
5386 align .imm = align << 8, .immisalign=1, .preind=0 */
5387 static parse_operand_result
5388 parse_neon_alignment (char **str, int i)
5393 my_get_expression (&exp, &p, GE_NO_PREFIX);
5395 if (exp.X_op != O_constant)
5397 inst.error = _("alignment must be constant");
5398 return PARSE_OPERAND_FAIL;
5401 inst.operands[i].imm = exp.X_add_number << 8;
5402 inst.operands[i].immisalign = 1;
5403 /* Alignments are not pre-indexes. */
5404 inst.operands[i].preind = 0;
5407 return PARSE_OPERAND_SUCCESS;
5410 /* Parse all forms of an ARM address expression. Information is written
5411 to inst.operands[i] and/or inst.reloc.
5413 Preindexed addressing (.preind=1):
5415 [Rn, #offset] .reg=Rn .reloc.exp=offset
5416 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5417 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5418 .shift_kind=shift .reloc.exp=shift_imm
5420 These three may have a trailing ! which causes .writeback to be set also.
5422 Postindexed addressing (.postind=1, .writeback=1):
5424 [Rn], #offset .reg=Rn .reloc.exp=offset
5425 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5426 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5427 .shift_kind=shift .reloc.exp=shift_imm
5429 Unindexed addressing (.preind=0, .postind=0):
5431 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5435 [Rn]{!} shorthand for [Rn,#0]{!}
5436 =immediate .isreg=0 .reloc.exp=immediate
5437 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5439 It is the caller's responsibility to check for addressing modes not
5440 supported by the instruction, and to set inst.reloc.type. */
5442 static parse_operand_result
5443 parse_address_main (char **str, int i, int group_relocations,
5444 group_reloc_type group_type)
5449 if (skip_past_char (&p, '[') == FAIL)
5451 if (skip_past_char (&p, '=') == FAIL)
5453 /* Bare address - translate to PC-relative offset. */
5454 inst.reloc.pc_rel = 1;
5455 inst.operands[i].reg = REG_PC;
5456 inst.operands[i].isreg = 1;
5457 inst.operands[i].preind = 1;
5459 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5460 return PARSE_OPERAND_FAIL;
5462 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5463 /*allow_symbol_p=*/TRUE))
5464 return PARSE_OPERAND_FAIL;
5467 return PARSE_OPERAND_SUCCESS;
5470 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5471 skip_whitespace (p);
5473 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5475 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5476 return PARSE_OPERAND_FAIL;
5478 inst.operands[i].reg = reg;
5479 inst.operands[i].isreg = 1;
5481 if (skip_past_comma (&p) == SUCCESS)
5483 inst.operands[i].preind = 1;
5486 else if (*p == '-') p++, inst.operands[i].negative = 1;
5488 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5490 inst.operands[i].imm = reg;
5491 inst.operands[i].immisreg = 1;
5493 if (skip_past_comma (&p) == SUCCESS)
5494 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5495 return PARSE_OPERAND_FAIL;
5497 else if (skip_past_char (&p, ':') == SUCCESS)
5499 /* FIXME: '@' should be used here, but it's filtered out by generic
5500 code before we get to see it here. This may be subject to
5502 parse_operand_result result = parse_neon_alignment (&p, i);
5504 if (result != PARSE_OPERAND_SUCCESS)
5509 if (inst.operands[i].negative)
5511 inst.operands[i].negative = 0;
5515 if (group_relocations
5516 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5518 struct group_reloc_table_entry *entry;
5520 /* Skip over the #: or : sequence. */
5526 /* Try to parse a group relocation. Anything else is an
5528 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5530 inst.error = _("unknown group relocation");
5531 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5534 /* We now have the group relocation table entry corresponding to
5535 the name in the assembler source. Next, we parse the
5537 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5538 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5540 /* Record the relocation type. */
5544 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5548 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5552 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5559 if (inst.reloc.type == 0)
5561 inst.error = _("this group relocation is not allowed on this instruction");
5562 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5568 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5569 return PARSE_OPERAND_FAIL;
5570 /* If the offset is 0, find out if it's a +0 or -0. */
5571 if (inst.reloc.exp.X_op == O_constant
5572 && inst.reloc.exp.X_add_number == 0)
5574 skip_whitespace (q);
5578 skip_whitespace (q);
5581 inst.operands[i].negative = 1;
5586 else if (skip_past_char (&p, ':') == SUCCESS)
5588 /* FIXME: '@' should be used here, but it's filtered out by generic code
5589 before we get to see it here. This may be subject to change. */
5590 parse_operand_result result = parse_neon_alignment (&p, i);
5592 if (result != PARSE_OPERAND_SUCCESS)
5596 if (skip_past_char (&p, ']') == FAIL)
5598 inst.error = _("']' expected");
5599 return PARSE_OPERAND_FAIL;
5602 if (skip_past_char (&p, '!') == SUCCESS)
5603 inst.operands[i].writeback = 1;
5605 else if (skip_past_comma (&p) == SUCCESS)
5607 if (skip_past_char (&p, '{') == SUCCESS)
5609 /* [Rn], {expr} - unindexed, with option */
5610 if (parse_immediate (&p, &inst.operands[i].imm,
5611 0, 255, TRUE) == FAIL)
5612 return PARSE_OPERAND_FAIL;
5614 if (skip_past_char (&p, '}') == FAIL)
5616 inst.error = _("'}' expected at end of 'option' field");
5617 return PARSE_OPERAND_FAIL;
5619 if (inst.operands[i].preind)
5621 inst.error = _("cannot combine index with option");
5622 return PARSE_OPERAND_FAIL;
5625 return PARSE_OPERAND_SUCCESS;
5629 inst.operands[i].postind = 1;
5630 inst.operands[i].writeback = 1;
5632 if (inst.operands[i].preind)
5634 inst.error = _("cannot combine pre- and post-indexing");
5635 return PARSE_OPERAND_FAIL;
5639 else if (*p == '-') p++, inst.operands[i].negative = 1;
5641 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5643 /* We might be using the immediate for alignment already. If we
5644 are, OR the register number into the low-order bits. */
5645 if (inst.operands[i].immisalign)
5646 inst.operands[i].imm |= reg;
5648 inst.operands[i].imm = reg;
5649 inst.operands[i].immisreg = 1;
5651 if (skip_past_comma (&p) == SUCCESS)
5652 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5653 return PARSE_OPERAND_FAIL;
5658 if (inst.operands[i].negative)
5660 inst.operands[i].negative = 0;
5663 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5664 return PARSE_OPERAND_FAIL;
5665 /* If the offset is 0, find out if it's a +0 or -0. */
5666 if (inst.reloc.exp.X_op == O_constant
5667 && inst.reloc.exp.X_add_number == 0)
5669 skip_whitespace (q);
5673 skip_whitespace (q);
5676 inst.operands[i].negative = 1;
5682 /* If at this point neither .preind nor .postind is set, we have a
5683 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5684 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5686 inst.operands[i].preind = 1;
5687 inst.reloc.exp.X_op = O_constant;
5688 inst.reloc.exp.X_add_number = 0;
5691 return PARSE_OPERAND_SUCCESS;
5695 parse_address (char **str, int i)
5697 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5701 static parse_operand_result
5702 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5704 return parse_address_main (str, i, 1, type);
5707 /* Parse an operand for a MOVW or MOVT instruction. */
5709 parse_half (char **str)
5714 skip_past_char (&p, '#');
5715 if (strncasecmp (p, ":lower16:", 9) == 0)
5716 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5717 else if (strncasecmp (p, ":upper16:", 9) == 0)
5718 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5720 if (inst.reloc.type != BFD_RELOC_UNUSED)
5723 skip_whitespace (p);
5726 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5729 if (inst.reloc.type == BFD_RELOC_UNUSED)
5731 if (inst.reloc.exp.X_op != O_constant)
5733 inst.error = _("constant expression expected");
5736 if (inst.reloc.exp.X_add_number < 0
5737 || inst.reloc.exp.X_add_number > 0xffff)
5739 inst.error = _("immediate value out of range");
5747 /* Miscellaneous. */
5749 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5750 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5752 parse_psr (char **str, bfd_boolean lhs)
5755 unsigned long psr_field;
5756 const struct asm_psr *psr;
5758 bfd_boolean is_apsr = FALSE;
5759 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5761 /* PR gas/12698: If the user has specified -march=all then m_profile will
5762 be TRUE, but we want to ignore it in this case as we are building for any
5763 CPU type, including non-m variants. */
5764 if (selected_cpu.core == arm_arch_any.core)
5767 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5768 feature for ease of use and backwards compatibility. */
5770 if (strncasecmp (p, "SPSR", 4) == 0)
5773 goto unsupported_psr;
5775 psr_field = SPSR_BIT;
5777 else if (strncasecmp (p, "CPSR", 4) == 0)
5780 goto unsupported_psr;
5784 else if (strncasecmp (p, "APSR", 4) == 0)
5786 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5787 and ARMv7-R architecture CPUs. */
5796 while (ISALNUM (*p) || *p == '_');
5798 if (strncasecmp (start, "iapsr", 5) == 0
5799 || strncasecmp (start, "eapsr", 5) == 0
5800 || strncasecmp (start, "xpsr", 4) == 0
5801 || strncasecmp (start, "psr", 3) == 0)
5802 p = start + strcspn (start, "rR") + 1;
5804 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5810 /* If APSR is being written, a bitfield may be specified. Note that
5811 APSR itself is handled above. */
5812 if (psr->field <= 3)
5814 psr_field = psr->field;
5820 /* M-profile MSR instructions have the mask field set to "10", except
5821 *PSR variants which modify APSR, which may use a different mask (and
5822 have been handled already). Do that by setting the PSR_f field
5824 return psr->field | (lhs ? PSR_f : 0);
5827 goto unsupported_psr;
5833 /* A suffix follows. */
5839 while (ISALNUM (*p) || *p == '_');
5843 /* APSR uses a notation for bits, rather than fields. */
5844 unsigned int nzcvq_bits = 0;
5845 unsigned int g_bit = 0;
5848 for (bit = start; bit != p; bit++)
5850 switch (TOLOWER (*bit))
5853 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5857 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5861 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5865 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5869 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5873 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5877 inst.error = _("unexpected bit specified after APSR");
5882 if (nzcvq_bits == 0x1f)
5887 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5889 inst.error = _("selected processor does not "
5890 "support DSP extension");
5897 if ((nzcvq_bits & 0x20) != 0
5898 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5899 || (g_bit & 0x2) != 0)
5901 inst.error = _("bad bitmask specified after APSR");
5907 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
5912 psr_field |= psr->field;
5918 goto error; /* Garbage after "[CS]PSR". */
5920 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
5921 is deprecated, but allow it anyway. */
5925 as_tsktsk (_("writing to APSR without specifying a bitmask is "
5928 else if (!m_profile)
5929 /* These bits are never right for M-profile devices: don't set them
5930 (only code paths which read/write APSR reach here). */
5931 psr_field |= (PSR_c | PSR_f);
5937 inst.error = _("selected processor does not support requested special "
5938 "purpose register");
5942 inst.error = _("flag for {c}psr instruction expected");
5946 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
5947 value suitable for splatting into the AIF field of the instruction. */
5950 parse_cps_flags (char **str)
5959 case '\0': case ',':
5962 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
5963 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
5964 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
5967 inst.error = _("unrecognized CPS flag");
5972 if (saw_a_flag == 0)
5974 inst.error = _("missing CPS flags");
5982 /* Parse an endian specifier ("BE" or "LE", case insensitive);
5983 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
5986 parse_endian_specifier (char **str)
5991 if (strncasecmp (s, "BE", 2))
5993 else if (strncasecmp (s, "LE", 2))
5997 inst.error = _("valid endian specifiers are be or le");
6001 if (ISALNUM (s[2]) || s[2] == '_')
6003 inst.error = _("valid endian specifiers are be or le");
6008 return little_endian;
6011 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6012 value suitable for poking into the rotate field of an sxt or sxta
6013 instruction, or FAIL on error. */
6016 parse_ror (char **str)
6021 if (strncasecmp (s, "ROR", 3) == 0)
6025 inst.error = _("missing rotation field after comma");
6029 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6034 case 0: *str = s; return 0x0;
6035 case 8: *str = s; return 0x1;
6036 case 16: *str = s; return 0x2;
6037 case 24: *str = s; return 0x3;
6040 inst.error = _("rotation can only be 0, 8, 16, or 24");
6045 /* Parse a conditional code (from conds[] below). The value returned is in the
6046 range 0 .. 14, or FAIL. */
6048 parse_cond (char **str)
6051 const struct asm_cond *c;
6053 /* Condition codes are always 2 characters, so matching up to
6054 3 characters is sufficient. */
6059 while (ISALPHA (*q) && n < 3)
6061 cond[n] = TOLOWER (*q);
6066 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6069 inst.error = _("condition required");
6077 /* If the given feature available in the selected CPU, mark it as used.
6078 Returns TRUE iff feature is available. */
6080 mark_feature_used (const arm_feature_set *feature)
6082 /* Ensure the option is valid on the current architecture. */
6083 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6086 /* Add the appropriate architecture feature for the barrier option used.
6089 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6091 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6096 /* Parse an option for a barrier instruction. Returns the encoding for the
6099 parse_barrier (char **str)
6102 const struct asm_barrier_opt *o;
6105 while (ISALPHA (*q))
6108 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6113 if (!mark_feature_used (&o->arch))
6120 /* Parse the operands of a table branch instruction. Similar to a memory
6123 parse_tb (char **str)
6128 if (skip_past_char (&p, '[') == FAIL)
6130 inst.error = _("'[' expected");
6134 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6136 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6139 inst.operands[0].reg = reg;
6141 if (skip_past_comma (&p) == FAIL)
6143 inst.error = _("',' expected");
6147 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6149 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6152 inst.operands[0].imm = reg;
6154 if (skip_past_comma (&p) == SUCCESS)
6156 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6158 if (inst.reloc.exp.X_add_number != 1)
6160 inst.error = _("invalid shift");
6163 inst.operands[0].shifted = 1;
6166 if (skip_past_char (&p, ']') == FAIL)
6168 inst.error = _("']' expected");
6175 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6176 information on the types the operands can take and how they are encoded.
6177 Up to four operands may be read; this function handles setting the
6178 ".present" field for each read operand itself.
6179 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6180 else returns FAIL. */
6183 parse_neon_mov (char **str, int *which_operand)
6185 int i = *which_operand, val;
6186 enum arm_reg_type rtype;
6188 struct neon_type_el optype;
6190 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6192 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6193 inst.operands[i].reg = val;
6194 inst.operands[i].isscalar = 1;
6195 inst.operands[i].vectype = optype;
6196 inst.operands[i++].present = 1;
6198 if (skip_past_comma (&ptr) == FAIL)
6201 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6204 inst.operands[i].reg = val;
6205 inst.operands[i].isreg = 1;
6206 inst.operands[i].present = 1;
6208 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6211 /* Cases 0, 1, 2, 3, 5 (D only). */
6212 if (skip_past_comma (&ptr) == FAIL)
6215 inst.operands[i].reg = val;
6216 inst.operands[i].isreg = 1;
6217 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6218 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6219 inst.operands[i].isvec = 1;
6220 inst.operands[i].vectype = optype;
6221 inst.operands[i++].present = 1;
6223 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6225 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6226 Case 13: VMOV <Sd>, <Rm> */
6227 inst.operands[i].reg = val;
6228 inst.operands[i].isreg = 1;
6229 inst.operands[i].present = 1;
6231 if (rtype == REG_TYPE_NQ)
6233 first_error (_("can't use Neon quad register here"));
6236 else if (rtype != REG_TYPE_VFS)
6239 if (skip_past_comma (&ptr) == FAIL)
6241 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6243 inst.operands[i].reg = val;
6244 inst.operands[i].isreg = 1;
6245 inst.operands[i].present = 1;
6248 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6251 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6252 Case 1: VMOV<c><q> <Dd>, <Dm>
6253 Case 8: VMOV.F32 <Sd>, <Sm>
6254 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6256 inst.operands[i].reg = val;
6257 inst.operands[i].isreg = 1;
6258 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6259 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6260 inst.operands[i].isvec = 1;
6261 inst.operands[i].vectype = optype;
6262 inst.operands[i].present = 1;
6264 if (skip_past_comma (&ptr) == SUCCESS)
6269 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6272 inst.operands[i].reg = val;
6273 inst.operands[i].isreg = 1;
6274 inst.operands[i++].present = 1;
6276 if (skip_past_comma (&ptr) == FAIL)
6279 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6282 inst.operands[i].reg = val;
6283 inst.operands[i].isreg = 1;
6284 inst.operands[i].present = 1;
6287 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6288 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6289 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6290 Case 10: VMOV.F32 <Sd>, #<imm>
6291 Case 11: VMOV.F64 <Dd>, #<imm> */
6292 inst.operands[i].immisfloat = 1;
6293 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6295 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6296 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6300 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6304 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6307 inst.operands[i].reg = val;
6308 inst.operands[i].isreg = 1;
6309 inst.operands[i++].present = 1;
6311 if (skip_past_comma (&ptr) == FAIL)
6314 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6316 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6317 inst.operands[i].reg = val;
6318 inst.operands[i].isscalar = 1;
6319 inst.operands[i].present = 1;
6320 inst.operands[i].vectype = optype;
6322 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6324 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6325 inst.operands[i].reg = val;
6326 inst.operands[i].isreg = 1;
6327 inst.operands[i++].present = 1;
6329 if (skip_past_comma (&ptr) == FAIL)
6332 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6335 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6339 inst.operands[i].reg = val;
6340 inst.operands[i].isreg = 1;
6341 inst.operands[i].isvec = 1;
6342 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6343 inst.operands[i].vectype = optype;
6344 inst.operands[i].present = 1;
6346 if (rtype == REG_TYPE_VFS)
6350 if (skip_past_comma (&ptr) == FAIL)
6352 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6355 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6358 inst.operands[i].reg = val;
6359 inst.operands[i].isreg = 1;
6360 inst.operands[i].isvec = 1;
6361 inst.operands[i].issingle = 1;
6362 inst.operands[i].vectype = optype;
6363 inst.operands[i].present = 1;
6366 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6370 inst.operands[i].reg = val;
6371 inst.operands[i].isreg = 1;
6372 inst.operands[i].isvec = 1;
6373 inst.operands[i].issingle = 1;
6374 inst.operands[i].vectype = optype;
6375 inst.operands[i].present = 1;
6380 first_error (_("parse error"));
6384 /* Successfully parsed the operands. Update args. */
6390 first_error (_("expected comma"));
6394 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6398 /* Use this macro when the operand constraints are different
6399 for ARM and THUMB (e.g. ldrd). */
6400 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6401 ((arm_operand) | ((thumb_operand) << 16))
6403 /* Matcher codes for parse_operands. */
6404 enum operand_parse_code
6406 OP_stop, /* end of line */
6408 OP_RR, /* ARM register */
6409 OP_RRnpc, /* ARM register, not r15 */
6410 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6411 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6412 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6413 optional trailing ! */
6414 OP_RRw, /* ARM register, not r15, optional trailing ! */
6415 OP_RCP, /* Coprocessor number */
6416 OP_RCN, /* Coprocessor register */
6417 OP_RF, /* FPA register */
6418 OP_RVS, /* VFP single precision register */
6419 OP_RVD, /* VFP double precision register (0..15) */
6420 OP_RND, /* Neon double precision register (0..31) */
6421 OP_RNQ, /* Neon quad precision register */
6422 OP_RVSD, /* VFP single or double precision register */
6423 OP_RNDQ, /* Neon double or quad precision register */
6424 OP_RNSDQ, /* Neon single, double or quad precision register */
6425 OP_RNSC, /* Neon scalar D[X] */
6426 OP_RVC, /* VFP control register */
6427 OP_RMF, /* Maverick F register */
6428 OP_RMD, /* Maverick D register */
6429 OP_RMFX, /* Maverick FX register */
6430 OP_RMDX, /* Maverick DX register */
6431 OP_RMAX, /* Maverick AX register */
6432 OP_RMDS, /* Maverick DSPSC register */
6433 OP_RIWR, /* iWMMXt wR register */
6434 OP_RIWC, /* iWMMXt wC register */
6435 OP_RIWG, /* iWMMXt wCG register */
6436 OP_RXA, /* XScale accumulator register */
6438 OP_REGLST, /* ARM register list */
6439 OP_VRSLST, /* VFP single-precision register list */
6440 OP_VRDLST, /* VFP double-precision register list */
6441 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6442 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6443 OP_NSTRLST, /* Neon element/structure list */
6445 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6446 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6447 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6448 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6449 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6450 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6451 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6452 OP_VMOV, /* Neon VMOV operands. */
6453 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6454 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6455 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6457 OP_I0, /* immediate zero */
6458 OP_I7, /* immediate value 0 .. 7 */
6459 OP_I15, /* 0 .. 15 */
6460 OP_I16, /* 1 .. 16 */
6461 OP_I16z, /* 0 .. 16 */
6462 OP_I31, /* 0 .. 31 */
6463 OP_I31w, /* 0 .. 31, optional trailing ! */
6464 OP_I32, /* 1 .. 32 */
6465 OP_I32z, /* 0 .. 32 */
6466 OP_I63, /* 0 .. 63 */
6467 OP_I63s, /* -64 .. 63 */
6468 OP_I64, /* 1 .. 64 */
6469 OP_I64z, /* 0 .. 64 */
6470 OP_I255, /* 0 .. 255 */
6472 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6473 OP_I7b, /* 0 .. 7 */
6474 OP_I15b, /* 0 .. 15 */
6475 OP_I31b, /* 0 .. 31 */
6477 OP_SH, /* shifter operand */
6478 OP_SHG, /* shifter operand with possible group relocation */
6479 OP_ADDR, /* Memory address expression (any mode) */
6480 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6481 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6482 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6483 OP_EXP, /* arbitrary expression */
6484 OP_EXPi, /* same, with optional immediate prefix */
6485 OP_EXPr, /* same, with optional relocation suffix */
6486 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6488 OP_CPSF, /* CPS flags */
6489 OP_ENDI, /* Endianness specifier */
6490 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6491 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6492 OP_COND, /* conditional code */
6493 OP_TB, /* Table branch. */
6495 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6497 OP_RRnpc_I0, /* ARM register or literal 0 */
6498 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
6499 OP_RR_EXi, /* ARM register or expression with imm prefix */
6500 OP_RF_IF, /* FPA register or immediate */
6501 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6502 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6504 /* Optional operands. */
6505 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6506 OP_oI31b, /* 0 .. 31 */
6507 OP_oI32b, /* 1 .. 32 */
6508 OP_oI32z, /* 0 .. 32 */
6509 OP_oIffffb, /* 0 .. 65535 */
6510 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6512 OP_oRR, /* ARM register */
6513 OP_oRRnpc, /* ARM register, not the PC */
6514 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6515 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6516 OP_oRND, /* Optional Neon double precision register */
6517 OP_oRNQ, /* Optional Neon quad precision register */
6518 OP_oRNDQ, /* Optional Neon double or quad precision register */
6519 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6520 OP_oSHll, /* LSL immediate */
6521 OP_oSHar, /* ASR immediate */
6522 OP_oSHllar, /* LSL or ASR immediate */
6523 OP_oROR, /* ROR 0/8/16/24 */
6524 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6526 /* Some pre-defined mixed (ARM/THUMB) operands. */
6527 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6528 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6529 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6531 OP_FIRST_OPTIONAL = OP_oI7b
6534 /* Generic instruction operand parser. This does no encoding and no
6535 semantic validation; it merely squirrels values away in the inst
6536 structure. Returns SUCCESS or FAIL depending on whether the
6537 specified grammar matched. */
6539 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6541 unsigned const int *upat = pattern;
6542 char *backtrack_pos = 0;
6543 const char *backtrack_error = 0;
6544 int i, val = 0, backtrack_index = 0;
6545 enum arm_reg_type rtype;
6546 parse_operand_result result;
6547 unsigned int op_parse_code;
6549 #define po_char_or_fail(chr) \
6552 if (skip_past_char (&str, chr) == FAIL) \
6557 #define po_reg_or_fail(regtype) \
6560 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6561 & inst.operands[i].vectype); \
6564 first_error (_(reg_expected_msgs[regtype])); \
6567 inst.operands[i].reg = val; \
6568 inst.operands[i].isreg = 1; \
6569 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6570 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6571 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6572 || rtype == REG_TYPE_VFD \
6573 || rtype == REG_TYPE_NQ); \
6577 #define po_reg_or_goto(regtype, label) \
6580 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6581 & inst.operands[i].vectype); \
6585 inst.operands[i].reg = val; \
6586 inst.operands[i].isreg = 1; \
6587 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6588 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6589 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6590 || rtype == REG_TYPE_VFD \
6591 || rtype == REG_TYPE_NQ); \
6595 #define po_imm_or_fail(min, max, popt) \
6598 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6600 inst.operands[i].imm = val; \
6604 #define po_scalar_or_goto(elsz, label) \
6607 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6610 inst.operands[i].reg = val; \
6611 inst.operands[i].isscalar = 1; \
6615 #define po_misc_or_fail(expr) \
6623 #define po_misc_or_fail_no_backtrack(expr) \
6627 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6628 backtrack_pos = 0; \
6629 if (result != PARSE_OPERAND_SUCCESS) \
6634 #define po_barrier_or_imm(str) \
6637 val = parse_barrier (&str); \
6638 if (val == FAIL && ! ISALPHA (*str)) \
6641 /* ISB can only take SY as an option. */ \
6642 || ((inst.instruction & 0xf0) == 0x60 \
6645 inst.error = _("invalid barrier type"); \
6646 backtrack_pos = 0; \
6652 skip_whitespace (str);
6654 for (i = 0; upat[i] != OP_stop; i++)
6656 op_parse_code = upat[i];
6657 if (op_parse_code >= 1<<16)
6658 op_parse_code = thumb ? (op_parse_code >> 16)
6659 : (op_parse_code & ((1<<16)-1));
6661 if (op_parse_code >= OP_FIRST_OPTIONAL)
6663 /* Remember where we are in case we need to backtrack. */
6664 gas_assert (!backtrack_pos);
6665 backtrack_pos = str;
6666 backtrack_error = inst.error;
6667 backtrack_index = i;
6670 if (i > 0 && (i > 1 || inst.operands[0].present))
6671 po_char_or_fail (',');
6673 switch (op_parse_code)
6681 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6682 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6683 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6684 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6685 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6686 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6688 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6690 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6692 /* Also accept generic coprocessor regs for unknown registers. */
6694 po_reg_or_fail (REG_TYPE_CN);
6696 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6697 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6698 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6699 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6700 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6701 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6702 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6703 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6704 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6705 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6707 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6709 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6710 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6712 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6714 /* Neon scalar. Using an element size of 8 means that some invalid
6715 scalars are accepted here, so deal with those in later code. */
6716 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6720 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6723 po_imm_or_fail (0, 0, TRUE);
6728 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6733 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6736 if (parse_ifimm_zero (&str))
6737 inst.operands[i].imm = 0;
6741 = _("only floating point zero is allowed as immediate value");
6749 po_scalar_or_goto (8, try_rr);
6752 po_reg_or_fail (REG_TYPE_RN);
6758 po_scalar_or_goto (8, try_nsdq);
6761 po_reg_or_fail (REG_TYPE_NSDQ);
6767 po_scalar_or_goto (8, try_ndq);
6770 po_reg_or_fail (REG_TYPE_NDQ);
6776 po_scalar_or_goto (8, try_vfd);
6779 po_reg_or_fail (REG_TYPE_VFD);
6784 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6785 not careful then bad things might happen. */
6786 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6791 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6794 /* There's a possibility of getting a 64-bit immediate here, so
6795 we need special handling. */
6796 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6799 inst.error = _("immediate value is out of range");
6807 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6810 po_imm_or_fail (0, 63, TRUE);
6815 po_char_or_fail ('[');
6816 po_reg_or_fail (REG_TYPE_RN);
6817 po_char_or_fail (']');
6823 po_reg_or_fail (REG_TYPE_RN);
6824 if (skip_past_char (&str, '!') == SUCCESS)
6825 inst.operands[i].writeback = 1;
6829 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6830 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6831 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6832 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6833 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6834 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6835 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6836 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6837 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6838 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6839 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6840 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6842 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6844 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6845 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6847 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6848 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6849 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6850 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6852 /* Immediate variants */
6854 po_char_or_fail ('{');
6855 po_imm_or_fail (0, 255, TRUE);
6856 po_char_or_fail ('}');
6860 /* The expression parser chokes on a trailing !, so we have
6861 to find it first and zap it. */
6864 while (*s && *s != ',')
6869 inst.operands[i].writeback = 1;
6871 po_imm_or_fail (0, 31, TRUE);
6879 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6884 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6889 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6891 if (inst.reloc.exp.X_op == O_symbol)
6893 val = parse_reloc (&str);
6896 inst.error = _("unrecognized relocation suffix");
6899 else if (val != BFD_RELOC_UNUSED)
6901 inst.operands[i].imm = val;
6902 inst.operands[i].hasreloc = 1;
6907 /* Operand for MOVW or MOVT. */
6909 po_misc_or_fail (parse_half (&str));
6912 /* Register or expression. */
6913 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
6914 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
6916 /* Register or immediate. */
6917 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
6918 I0: po_imm_or_fail (0, 0, FALSE); break;
6920 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
6922 if (!is_immediate_prefix (*str))
6925 val = parse_fpa_immediate (&str);
6928 /* FPA immediates are encoded as registers 8-15.
6929 parse_fpa_immediate has already applied the offset. */
6930 inst.operands[i].reg = val;
6931 inst.operands[i].isreg = 1;
6934 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
6935 I32z: po_imm_or_fail (0, 32, FALSE); break;
6937 /* Two kinds of register. */
6940 struct reg_entry *rege = arm_reg_parse_multi (&str);
6942 || (rege->type != REG_TYPE_MMXWR
6943 && rege->type != REG_TYPE_MMXWC
6944 && rege->type != REG_TYPE_MMXWCG))
6946 inst.error = _("iWMMXt data or control register expected");
6949 inst.operands[i].reg = rege->number;
6950 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
6956 struct reg_entry *rege = arm_reg_parse_multi (&str);
6958 || (rege->type != REG_TYPE_MMXWC
6959 && rege->type != REG_TYPE_MMXWCG))
6961 inst.error = _("iWMMXt control register expected");
6964 inst.operands[i].reg = rege->number;
6965 inst.operands[i].isreg = 1;
6970 case OP_CPSF: val = parse_cps_flags (&str); break;
6971 case OP_ENDI: val = parse_endian_specifier (&str); break;
6972 case OP_oROR: val = parse_ror (&str); break;
6973 case OP_COND: val = parse_cond (&str); break;
6974 case OP_oBARRIER_I15:
6975 po_barrier_or_imm (str); break;
6977 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
6983 po_reg_or_goto (REG_TYPE_RNB, try_psr);
6984 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
6986 inst.error = _("Banked registers are not available with this "
6992 val = parse_psr (&str, op_parse_code == OP_wPSR);
6996 po_reg_or_goto (REG_TYPE_RN, try_apsr);
6999 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7001 if (strncasecmp (str, "APSR_", 5) == 0)
7008 case 'c': found = (found & 1) ? 16 : found | 1; break;
7009 case 'n': found = (found & 2) ? 16 : found | 2; break;
7010 case 'z': found = (found & 4) ? 16 : found | 4; break;
7011 case 'v': found = (found & 8) ? 16 : found | 8; break;
7012 default: found = 16;
7016 inst.operands[i].isvec = 1;
7017 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7018 inst.operands[i].reg = REG_PC;
7025 po_misc_or_fail (parse_tb (&str));
7028 /* Register lists. */
7030 val = parse_reg_list (&str);
7033 inst.operands[1].writeback = 1;
7039 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7043 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7047 /* Allow Q registers too. */
7048 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7053 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7055 inst.operands[i].issingle = 1;
7060 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7065 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7066 &inst.operands[i].vectype);
7069 /* Addressing modes */
7071 po_misc_or_fail (parse_address (&str, i));
7075 po_misc_or_fail_no_backtrack (
7076 parse_address_group_reloc (&str, i, GROUP_LDR));
7080 po_misc_or_fail_no_backtrack (
7081 parse_address_group_reloc (&str, i, GROUP_LDRS));
7085 po_misc_or_fail_no_backtrack (
7086 parse_address_group_reloc (&str, i, GROUP_LDC));
7090 po_misc_or_fail (parse_shifter_operand (&str, i));
7094 po_misc_or_fail_no_backtrack (
7095 parse_shifter_operand_group_reloc (&str, i));
7099 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7103 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7107 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7111 as_fatal (_("unhandled operand code %d"), op_parse_code);
7114 /* Various value-based sanity checks and shared operations. We
7115 do not signal immediate failures for the register constraints;
7116 this allows a syntax error to take precedence. */
7117 switch (op_parse_code)
7125 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7126 inst.error = BAD_PC;
7131 if (inst.operands[i].isreg)
7133 if (inst.operands[i].reg == REG_PC)
7134 inst.error = BAD_PC;
7135 else if (inst.operands[i].reg == REG_SP)
7136 inst.error = BAD_SP;
7141 if (inst.operands[i].isreg
7142 && inst.operands[i].reg == REG_PC
7143 && (inst.operands[i].writeback || thumb))
7144 inst.error = BAD_PC;
7153 case OP_oBARRIER_I15:
7162 inst.operands[i].imm = val;
7169 /* If we get here, this operand was successfully parsed. */
7170 inst.operands[i].present = 1;
7174 inst.error = BAD_ARGS;
7179 /* The parse routine should already have set inst.error, but set a
7180 default here just in case. */
7182 inst.error = _("syntax error");
7186 /* Do not backtrack over a trailing optional argument that
7187 absorbed some text. We will only fail again, with the
7188 'garbage following instruction' error message, which is
7189 probably less helpful than the current one. */
7190 if (backtrack_index == i && backtrack_pos != str
7191 && upat[i+1] == OP_stop)
7194 inst.error = _("syntax error");
7198 /* Try again, skipping the optional argument at backtrack_pos. */
7199 str = backtrack_pos;
7200 inst.error = backtrack_error;
7201 inst.operands[backtrack_index].present = 0;
7202 i = backtrack_index;
7206 /* Check that we have parsed all the arguments. */
7207 if (*str != '\0' && !inst.error)
7208 inst.error = _("garbage following instruction");
7210 return inst.error ? FAIL : SUCCESS;
7213 #undef po_char_or_fail
7214 #undef po_reg_or_fail
7215 #undef po_reg_or_goto
7216 #undef po_imm_or_fail
7217 #undef po_scalar_or_fail
7218 #undef po_barrier_or_imm
7220 /* Shorthand macro for instruction encoding functions issuing errors. */
7221 #define constraint(expr, err) \
7232 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7233 instructions are unpredictable if these registers are used. This
7234 is the BadReg predicate in ARM's Thumb-2 documentation. */
7235 #define reject_bad_reg(reg) \
7237 if (reg == REG_SP || reg == REG_PC) \
7239 inst.error = (reg == REG_SP) ? BAD_SP : BAD_PC; \
7244 /* If REG is R13 (the stack pointer), warn that its use is
7246 #define warn_deprecated_sp(reg) \
7248 if (warn_on_deprecated && reg == REG_SP) \
7249 as_warn (_("use of r13 is deprecated")); \
7252 /* Functions for operand encoding. ARM, then Thumb. */
7254 #define rotate_left(v, n) (v << n | v >> (32 - n))
7256 /* If VAL can be encoded in the immediate field of an ARM instruction,
7257 return the encoded form. Otherwise, return FAIL. */
7260 encode_arm_immediate (unsigned int val)
7264 for (i = 0; i < 32; i += 2)
7265 if ((a = rotate_left (val, i)) <= 0xff)
7266 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7271 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7272 return the encoded form. Otherwise, return FAIL. */
7274 encode_thumb32_immediate (unsigned int val)
7281 for (i = 1; i <= 24; i++)
7284 if ((val & ~(0xff << i)) == 0)
7285 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7289 if (val == ((a << 16) | a))
7291 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7295 if (val == ((a << 16) | a))
7296 return 0x200 | (a >> 8);
7300 /* Encode a VFP SP or DP register number into inst.instruction. */
7303 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7305 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7308 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7311 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7314 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7319 first_error (_("D register out of range for selected VFP version"));
7327 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7331 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7335 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7339 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7343 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7347 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7355 /* Encode a <shift> in an ARM-format instruction. The immediate,
7356 if any, is handled by md_apply_fix. */
7358 encode_arm_shift (int i)
7360 if (inst.operands[i].shift_kind == SHIFT_RRX)
7361 inst.instruction |= SHIFT_ROR << 5;
7364 inst.instruction |= inst.operands[i].shift_kind << 5;
7365 if (inst.operands[i].immisreg)
7367 inst.instruction |= SHIFT_BY_REG;
7368 inst.instruction |= inst.operands[i].imm << 8;
7371 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7376 encode_arm_shifter_operand (int i)
7378 if (inst.operands[i].isreg)
7380 inst.instruction |= inst.operands[i].reg;
7381 encode_arm_shift (i);
7385 inst.instruction |= INST_IMMEDIATE;
7386 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7387 inst.instruction |= inst.operands[i].imm;
7391 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7393 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7396 Generate an error if the operand is not a register. */
7397 constraint (!inst.operands[i].isreg,
7398 _("Instruction does not support =N addresses"));
7400 inst.instruction |= inst.operands[i].reg << 16;
7402 if (inst.operands[i].preind)
7406 inst.error = _("instruction does not accept preindexed addressing");
7409 inst.instruction |= PRE_INDEX;
7410 if (inst.operands[i].writeback)
7411 inst.instruction |= WRITE_BACK;
7414 else if (inst.operands[i].postind)
7416 gas_assert (inst.operands[i].writeback);
7418 inst.instruction |= WRITE_BACK;
7420 else /* unindexed - only for coprocessor */
7422 inst.error = _("instruction does not accept unindexed addressing");
7426 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7427 && (((inst.instruction & 0x000f0000) >> 16)
7428 == ((inst.instruction & 0x0000f000) >> 12)))
7429 as_warn ((inst.instruction & LOAD_BIT)
7430 ? _("destination register same as write-back base")
7431 : _("source register same as write-back base"));
7434 /* inst.operands[i] was set up by parse_address. Encode it into an
7435 ARM-format mode 2 load or store instruction. If is_t is true,
7436 reject forms that cannot be used with a T instruction (i.e. not
7439 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7441 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7443 encode_arm_addr_mode_common (i, is_t);
7445 if (inst.operands[i].immisreg)
7447 constraint ((inst.operands[i].imm == REG_PC
7448 || (is_pc && inst.operands[i].writeback)),
7450 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7451 inst.instruction |= inst.operands[i].imm;
7452 if (!inst.operands[i].negative)
7453 inst.instruction |= INDEX_UP;
7454 if (inst.operands[i].shifted)
7456 if (inst.operands[i].shift_kind == SHIFT_RRX)
7457 inst.instruction |= SHIFT_ROR << 5;
7460 inst.instruction |= inst.operands[i].shift_kind << 5;
7461 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7465 else /* immediate offset in inst.reloc */
7467 if (is_pc && !inst.reloc.pc_rel)
7469 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7471 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7472 cannot use PC in addressing.
7473 PC cannot be used in writeback addressing, either. */
7474 constraint ((is_t || inst.operands[i].writeback),
7477 /* Use of PC in str is deprecated for ARMv7. */
7478 if (warn_on_deprecated
7480 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7481 as_warn (_("use of PC in this instruction is deprecated"));
7484 if (inst.reloc.type == BFD_RELOC_UNUSED)
7486 /* Prefer + for zero encoded value. */
7487 if (!inst.operands[i].negative)
7488 inst.instruction |= INDEX_UP;
7489 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7494 /* inst.operands[i] was set up by parse_address. Encode it into an
7495 ARM-format mode 3 load or store instruction. Reject forms that
7496 cannot be used with such instructions. If is_t is true, reject
7497 forms that cannot be used with a T instruction (i.e. not
7500 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7502 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7504 inst.error = _("instruction does not accept scaled register index");
7508 encode_arm_addr_mode_common (i, is_t);
7510 if (inst.operands[i].immisreg)
7512 constraint ((inst.operands[i].imm == REG_PC
7513 || (is_t && inst.operands[i].reg == REG_PC)),
7515 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7517 inst.instruction |= inst.operands[i].imm;
7518 if (!inst.operands[i].negative)
7519 inst.instruction |= INDEX_UP;
7521 else /* immediate offset in inst.reloc */
7523 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7524 && inst.operands[i].writeback),
7526 inst.instruction |= HWOFFSET_IMM;
7527 if (inst.reloc.type == BFD_RELOC_UNUSED)
7529 /* Prefer + for zero encoded value. */
7530 if (!inst.operands[i].negative)
7531 inst.instruction |= INDEX_UP;
7533 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7538 /* Write immediate bits [7:0] to the following locations:
7540 |28/24|23 19|18 16|15 4|3 0|
7541 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7543 This function is used by VMOV/VMVN/VORR/VBIC. */
7546 neon_write_immbits (unsigned immbits)
7548 inst.instruction |= immbits & 0xf;
7549 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7550 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7553 /* Invert low-order SIZE bits of XHI:XLO. */
7556 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7558 unsigned immlo = xlo ? *xlo : 0;
7559 unsigned immhi = xhi ? *xhi : 0;
7564 immlo = (~immlo) & 0xff;
7568 immlo = (~immlo) & 0xffff;
7572 immhi = (~immhi) & 0xffffffff;
7576 immlo = (~immlo) & 0xffffffff;
7590 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7594 neon_bits_same_in_bytes (unsigned imm)
7596 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7597 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7598 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7599 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7602 /* For immediate of above form, return 0bABCD. */
7605 neon_squash_bits (unsigned imm)
7607 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7608 | ((imm & 0x01000000) >> 21);
7611 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7614 neon_qfloat_bits (unsigned imm)
7616 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7619 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7620 the instruction. *OP is passed as the initial value of the op field, and
7621 may be set to a different value depending on the constant (i.e.
7622 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7623 MVN). If the immediate looks like a repeated pattern then also
7624 try smaller element sizes. */
7627 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7628 unsigned *immbits, int *op, int size,
7629 enum neon_el_type type)
7631 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7633 if (type == NT_float && !float_p)
7636 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7638 if (size != 32 || *op == 1)
7640 *immbits = neon_qfloat_bits (immlo);
7646 if (neon_bits_same_in_bytes (immhi)
7647 && neon_bits_same_in_bytes (immlo))
7651 *immbits = (neon_squash_bits (immhi) << 4)
7652 | neon_squash_bits (immlo);
7663 if (immlo == (immlo & 0x000000ff))
7668 else if (immlo == (immlo & 0x0000ff00))
7670 *immbits = immlo >> 8;
7673 else if (immlo == (immlo & 0x00ff0000))
7675 *immbits = immlo >> 16;
7678 else if (immlo == (immlo & 0xff000000))
7680 *immbits = immlo >> 24;
7683 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7685 *immbits = (immlo >> 8) & 0xff;
7688 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7690 *immbits = (immlo >> 16) & 0xff;
7694 if ((immlo & 0xffff) != (immlo >> 16))
7701 if (immlo == (immlo & 0x000000ff))
7706 else if (immlo == (immlo & 0x0000ff00))
7708 *immbits = immlo >> 8;
7712 if ((immlo & 0xff) != (immlo >> 8))
7717 if (immlo == (immlo & 0x000000ff))
7719 /* Don't allow MVN with 8-bit immediate. */
7736 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7737 Determine whether it can be performed with a move instruction; if
7738 it can, convert inst.instruction to that move instruction and
7739 return TRUE; if it can't, convert inst.instruction to a literal-pool
7740 load and return FALSE. If this is not a valid thing to do in the
7741 current context, set inst.error and return TRUE.
7743 inst.operands[i] describes the destination register. */
7746 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7749 bfd_boolean thumb_p = (t == CONST_THUMB);
7750 bfd_boolean arm_p = (t == CONST_ARM);
7751 bfd_boolean vec64_p = (t == CONST_VEC) && !inst.operands[i].issingle;
7754 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7758 if ((inst.instruction & tbit) == 0)
7760 inst.error = _("invalid pseudo operation");
7763 if (inst.reloc.exp.X_op != O_constant
7764 && inst.reloc.exp.X_op != O_symbol
7765 && inst.reloc.exp.X_op != O_big)
7767 inst.error = _("constant expression expected");
7770 if ((inst.reloc.exp.X_op == O_constant
7771 || inst.reloc.exp.X_op == O_big)
7772 && !inst.operands[i].issingle)
7774 if (thumb_p && inst.reloc.exp.X_op == O_constant)
7776 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
7778 /* This can be done with a mov(1) instruction. */
7779 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
7780 inst.instruction |= inst.reloc.exp.X_add_number;
7784 else if (arm_p && inst.reloc.exp.X_op == O_constant)
7786 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
7789 /* This can be done with a mov instruction. */
7790 inst.instruction &= LITERAL_MASK;
7791 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
7792 inst.instruction |= value & 0xfff;
7796 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
7799 /* This can be done with a mvn instruction. */
7800 inst.instruction &= LITERAL_MASK;
7801 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
7802 inst.instruction |= value & 0xfff;
7809 unsigned immbits = 0;
7810 unsigned immlo = inst.operands[1].imm;
7811 unsigned immhi = inst.operands[1].regisimm
7812 ? inst.operands[1].reg
7813 : inst.reloc.exp.X_unsigned
7815 : ((int64_t)((int) immlo)) >> 32;
7816 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7817 &op, 64, NT_invtype);
7821 neon_invert_size (&immlo, &immhi, 64);
7823 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
7824 &op, 64, NT_invtype);
7828 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
7833 /* Fill other bits in vmov encoding for both thumb and arm. */
7835 inst.instruction |= (0x7 << 29) | (0xF << 24);
7837 inst.instruction |= (0xF << 28) | (0x1 << 25);
7838 neon_write_immbits (immbits);
7844 if (add_to_lit_pool ((!inst.operands[i].isvec
7845 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
7848 inst.operands[1].reg = REG_PC;
7849 inst.operands[1].isreg = 1;
7850 inst.operands[1].preind = 1;
7851 inst.reloc.pc_rel = 1;
7852 inst.reloc.type = (thumb_p
7853 ? BFD_RELOC_ARM_THUMB_OFFSET
7855 ? BFD_RELOC_ARM_HWLITERAL
7856 : BFD_RELOC_ARM_LITERAL));
7860 /* inst.operands[i] was set up by parse_address. Encode it into an
7861 ARM-format instruction. Reject all forms which cannot be encoded
7862 into a coprocessor load/store instruction. If wb_ok is false,
7863 reject use of writeback; if unind_ok is false, reject use of
7864 unindexed addressing. If reloc_override is not 0, use it instead
7865 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
7866 (in which case it is preserved). */
7869 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
7871 if (!inst.operands[i].isreg)
7873 gas_assert (inst.operands[0].isvec);
7874 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
7878 inst.instruction |= inst.operands[i].reg << 16;
7880 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
7882 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
7884 gas_assert (!inst.operands[i].writeback);
7887 inst.error = _("instruction does not support unindexed addressing");
7890 inst.instruction |= inst.operands[i].imm;
7891 inst.instruction |= INDEX_UP;
7895 if (inst.operands[i].preind)
7896 inst.instruction |= PRE_INDEX;
7898 if (inst.operands[i].writeback)
7900 if (inst.operands[i].reg == REG_PC)
7902 inst.error = _("pc may not be used with write-back");
7907 inst.error = _("instruction does not support writeback");
7910 inst.instruction |= WRITE_BACK;
7914 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
7915 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
7916 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
7917 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
7920 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
7922 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
7925 /* Prefer + for zero encoded value. */
7926 if (!inst.operands[i].negative)
7927 inst.instruction |= INDEX_UP;
7932 /* Functions for instruction encoding, sorted by sub-architecture.
7933 First some generics; their names are taken from the conventional
7934 bit positions for register arguments in ARM format instructions. */
7944 inst.instruction |= inst.operands[0].reg << 12;
7950 inst.instruction |= inst.operands[0].reg << 12;
7951 inst.instruction |= inst.operands[1].reg;
7957 inst.instruction |= inst.operands[0].reg;
7958 inst.instruction |= inst.operands[1].reg << 16;
7964 inst.instruction |= inst.operands[0].reg << 12;
7965 inst.instruction |= inst.operands[1].reg << 16;
7971 inst.instruction |= inst.operands[0].reg << 16;
7972 inst.instruction |= inst.operands[1].reg << 12;
7976 check_obsolete (const arm_feature_set *feature, const char *msg)
7978 if (ARM_CPU_IS_ANY (cpu_variant))
7980 as_warn ("%s", msg);
7983 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
7995 unsigned Rn = inst.operands[2].reg;
7996 /* Enforce restrictions on SWP instruction. */
7997 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
7999 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8000 _("Rn must not overlap other operands"));
8002 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8004 if (!check_obsolete (&arm_ext_v8,
8005 _("swp{b} use is obsoleted for ARMv8 and later"))
8006 && warn_on_deprecated
8007 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8008 as_warn (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8011 inst.instruction |= inst.operands[0].reg << 12;
8012 inst.instruction |= inst.operands[1].reg;
8013 inst.instruction |= Rn << 16;
8019 inst.instruction |= inst.operands[0].reg << 12;
8020 inst.instruction |= inst.operands[1].reg << 16;
8021 inst.instruction |= inst.operands[2].reg;
8027 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8028 constraint (((inst.reloc.exp.X_op != O_constant
8029 && inst.reloc.exp.X_op != O_illegal)
8030 || inst.reloc.exp.X_add_number != 0),
8032 inst.instruction |= inst.operands[0].reg;
8033 inst.instruction |= inst.operands[1].reg << 12;
8034 inst.instruction |= inst.operands[2].reg << 16;
8040 inst.instruction |= inst.operands[0].imm;
8046 inst.instruction |= inst.operands[0].reg << 12;
8047 encode_arm_cp_address (1, TRUE, TRUE, 0);
8050 /* ARM instructions, in alphabetical order by function name (except
8051 that wrapper functions appear immediately after the function they
8054 /* This is a pseudo-op of the form "adr rd, label" to be converted
8055 into a relative address of the form "add rd, pc, #label-.-8". */
8060 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8062 /* Frag hacking will turn this into a sub instruction if the offset turns
8063 out to be negative. */
8064 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8065 inst.reloc.pc_rel = 1;
8066 inst.reloc.exp.X_add_number -= 8;
8069 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8070 into a relative address of the form:
8071 add rd, pc, #low(label-.-8)"
8072 add rd, rd, #high(label-.-8)" */
8077 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8079 /* Frag hacking will turn this into a sub instruction if the offset turns
8080 out to be negative. */
8081 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8082 inst.reloc.pc_rel = 1;
8083 inst.size = INSN_SIZE * 2;
8084 inst.reloc.exp.X_add_number -= 8;
8090 if (!inst.operands[1].present)
8091 inst.operands[1].reg = inst.operands[0].reg;
8092 inst.instruction |= inst.operands[0].reg << 12;
8093 inst.instruction |= inst.operands[1].reg << 16;
8094 encode_arm_shifter_operand (2);
8100 if (inst.operands[0].present)
8101 inst.instruction |= inst.operands[0].imm;
8103 inst.instruction |= 0xf;
8109 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8110 constraint (msb > 32, _("bit-field extends past end of register"));
8111 /* The instruction encoding stores the LSB and MSB,
8112 not the LSB and width. */
8113 inst.instruction |= inst.operands[0].reg << 12;
8114 inst.instruction |= inst.operands[1].imm << 7;
8115 inst.instruction |= (msb - 1) << 16;
8123 /* #0 in second position is alternative syntax for bfc, which is
8124 the same instruction but with REG_PC in the Rm field. */
8125 if (!inst.operands[1].isreg)
8126 inst.operands[1].reg = REG_PC;
8128 msb = inst.operands[2].imm + inst.operands[3].imm;
8129 constraint (msb > 32, _("bit-field extends past end of register"));
8130 /* The instruction encoding stores the LSB and MSB,
8131 not the LSB and width. */
8132 inst.instruction |= inst.operands[0].reg << 12;
8133 inst.instruction |= inst.operands[1].reg;
8134 inst.instruction |= inst.operands[2].imm << 7;
8135 inst.instruction |= (msb - 1) << 16;
8141 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8142 _("bit-field extends past end of register"));
8143 inst.instruction |= inst.operands[0].reg << 12;
8144 inst.instruction |= inst.operands[1].reg;
8145 inst.instruction |= inst.operands[2].imm << 7;
8146 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8149 /* ARM V5 breakpoint instruction (argument parse)
8150 BKPT <16 bit unsigned immediate>
8151 Instruction is not conditional.
8152 The bit pattern given in insns[] has the COND_ALWAYS condition,
8153 and it is an error if the caller tried to override that. */
8158 /* Top 12 of 16 bits to bits 19:8. */
8159 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8161 /* Bottom 4 of 16 bits to bits 3:0. */
8162 inst.instruction |= inst.operands[0].imm & 0xf;
8166 encode_branch (int default_reloc)
8168 if (inst.operands[0].hasreloc)
8170 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8171 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8172 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8173 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8174 ? BFD_RELOC_ARM_PLT32
8175 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8178 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8179 inst.reloc.pc_rel = 1;
8186 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8187 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8190 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8197 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8199 if (inst.cond == COND_ALWAYS)
8200 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8202 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8206 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8209 /* ARM V5 branch-link-exchange instruction (argument parse)
8210 BLX <target_addr> ie BLX(1)
8211 BLX{<condition>} <Rm> ie BLX(2)
8212 Unfortunately, there are two different opcodes for this mnemonic.
8213 So, the insns[].value is not used, and the code here zaps values
8214 into inst.instruction.
8215 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8220 if (inst.operands[0].isreg)
8222 /* Arg is a register; the opcode provided by insns[] is correct.
8223 It is not illegal to do "blx pc", just useless. */
8224 if (inst.operands[0].reg == REG_PC)
8225 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8227 inst.instruction |= inst.operands[0].reg;
8231 /* Arg is an address; this instruction cannot be executed
8232 conditionally, and the opcode must be adjusted.
8233 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8234 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8235 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8236 inst.instruction = 0xfa000000;
8237 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8244 bfd_boolean want_reloc;
8246 if (inst.operands[0].reg == REG_PC)
8247 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8249 inst.instruction |= inst.operands[0].reg;
8250 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8251 it is for ARMv4t or earlier. */
8252 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8253 if (object_arch && !ARM_CPU_HAS_FEATURE (*object_arch, arm_ext_v5))
8257 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8262 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8266 /* ARM v5TEJ. Jump to Jazelle code. */
8271 if (inst.operands[0].reg == REG_PC)
8272 as_tsktsk (_("use of r15 in bxj is not really useful"));
8274 inst.instruction |= inst.operands[0].reg;
8277 /* Co-processor data operation:
8278 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8279 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8283 inst.instruction |= inst.operands[0].reg << 8;
8284 inst.instruction |= inst.operands[1].imm << 20;
8285 inst.instruction |= inst.operands[2].reg << 12;
8286 inst.instruction |= inst.operands[3].reg << 16;
8287 inst.instruction |= inst.operands[4].reg;
8288 inst.instruction |= inst.operands[5].imm << 5;
8294 inst.instruction |= inst.operands[0].reg << 16;
8295 encode_arm_shifter_operand (1);
8298 /* Transfer between coprocessor and ARM registers.
8299 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8304 No special properties. */
8306 struct deprecated_coproc_regs_s
8313 arm_feature_set deprecated;
8314 arm_feature_set obsoleted;
8315 const char *dep_msg;
8316 const char *obs_msg;
8319 #define DEPR_ACCESS_V8 \
8320 N_("This coprocessor register access is deprecated in ARMv8")
8322 /* Table of all deprecated coprocessor registers. */
8323 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8325 {15, 0, 7, 10, 5, /* CP15DMB. */
8326 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8327 DEPR_ACCESS_V8, NULL},
8328 {15, 0, 7, 10, 4, /* CP15DSB. */
8329 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8330 DEPR_ACCESS_V8, NULL},
8331 {15, 0, 7, 5, 4, /* CP15ISB. */
8332 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8333 DEPR_ACCESS_V8, NULL},
8334 {14, 6, 1, 0, 0, /* TEEHBR. */
8335 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8336 DEPR_ACCESS_V8, NULL},
8337 {14, 6, 0, 0, 0, /* TEECR. */
8338 ARM_FEATURE (ARM_EXT_V8, 0), ARM_FEATURE (0, 0),
8339 DEPR_ACCESS_V8, NULL},
8342 #undef DEPR_ACCESS_V8
8344 static const size_t deprecated_coproc_reg_count =
8345 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8353 Rd = inst.operands[2].reg;
8356 if (inst.instruction == 0xee000010
8357 || inst.instruction == 0xfe000010)
8359 reject_bad_reg (Rd);
8362 constraint (Rd == REG_SP, BAD_SP);
8367 if (inst.instruction == 0xe000010)
8368 constraint (Rd == REG_PC, BAD_PC);
8371 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8373 const struct deprecated_coproc_regs_s *r =
8374 deprecated_coproc_regs + i;
8376 if (inst.operands[0].reg == r->cp
8377 && inst.operands[1].imm == r->opc1
8378 && inst.operands[3].reg == r->crn
8379 && inst.operands[4].reg == r->crm
8380 && inst.operands[5].imm == r->opc2)
8382 if (! ARM_CPU_IS_ANY (cpu_variant)
8383 && warn_on_deprecated
8384 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8385 as_warn ("%s", r->dep_msg);
8389 inst.instruction |= inst.operands[0].reg << 8;
8390 inst.instruction |= inst.operands[1].imm << 21;
8391 inst.instruction |= Rd << 12;
8392 inst.instruction |= inst.operands[3].reg << 16;
8393 inst.instruction |= inst.operands[4].reg;
8394 inst.instruction |= inst.operands[5].imm << 5;
8397 /* Transfer between coprocessor register and pair of ARM registers.
8398 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8403 Two XScale instructions are special cases of these:
8405 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8406 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8408 Result unpredictable if Rd or Rn is R15. */
8415 Rd = inst.operands[2].reg;
8416 Rn = inst.operands[3].reg;
8420 reject_bad_reg (Rd);
8421 reject_bad_reg (Rn);
8425 constraint (Rd == REG_PC, BAD_PC);
8426 constraint (Rn == REG_PC, BAD_PC);
8429 inst.instruction |= inst.operands[0].reg << 8;
8430 inst.instruction |= inst.operands[1].imm << 4;
8431 inst.instruction |= Rd << 12;
8432 inst.instruction |= Rn << 16;
8433 inst.instruction |= inst.operands[4].reg;
8439 inst.instruction |= inst.operands[0].imm << 6;
8440 if (inst.operands[1].present)
8442 inst.instruction |= CPSI_MMOD;
8443 inst.instruction |= inst.operands[1].imm;
8450 inst.instruction |= inst.operands[0].imm;
8456 unsigned Rd, Rn, Rm;
8458 Rd = inst.operands[0].reg;
8459 Rn = (inst.operands[1].present
8460 ? inst.operands[1].reg : Rd);
8461 Rm = inst.operands[2].reg;
8463 constraint ((Rd == REG_PC), BAD_PC);
8464 constraint ((Rn == REG_PC), BAD_PC);
8465 constraint ((Rm == REG_PC), BAD_PC);
8467 inst.instruction |= Rd << 16;
8468 inst.instruction |= Rn << 0;
8469 inst.instruction |= Rm << 8;
8475 /* There is no IT instruction in ARM mode. We
8476 process it to do the validation as if in
8477 thumb mode, just in case the code gets
8478 assembled for thumb using the unified syntax. */
8483 set_it_insn_type (IT_INSN);
8484 now_it.mask = (inst.instruction & 0xf) | 0x10;
8485 now_it.cc = inst.operands[0].imm;
8489 /* If there is only one register in the register list,
8490 then return its register number. Otherwise return -1. */
8492 only_one_reg_in_list (int range)
8494 int i = ffs (range) - 1;
8495 return (i > 15 || range != (1 << i)) ? -1 : i;
8499 encode_ldmstm(int from_push_pop_mnem)
8501 int base_reg = inst.operands[0].reg;
8502 int range = inst.operands[1].imm;
8505 inst.instruction |= base_reg << 16;
8506 inst.instruction |= range;
8508 if (inst.operands[1].writeback)
8509 inst.instruction |= LDM_TYPE_2_OR_3;
8511 if (inst.operands[0].writeback)
8513 inst.instruction |= WRITE_BACK;
8514 /* Check for unpredictable uses of writeback. */
8515 if (inst.instruction & LOAD_BIT)
8517 /* Not allowed in LDM type 2. */
8518 if ((inst.instruction & LDM_TYPE_2_OR_3)
8519 && ((range & (1 << REG_PC)) == 0))
8520 as_warn (_("writeback of base register is UNPREDICTABLE"));
8521 /* Only allowed if base reg not in list for other types. */
8522 else if (range & (1 << base_reg))
8523 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8527 /* Not allowed for type 2. */
8528 if (inst.instruction & LDM_TYPE_2_OR_3)
8529 as_warn (_("writeback of base register is UNPREDICTABLE"));
8530 /* Only allowed if base reg not in list, or first in list. */
8531 else if ((range & (1 << base_reg))
8532 && (range & ((1 << base_reg) - 1)))
8533 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8537 /* If PUSH/POP has only one register, then use the A2 encoding. */
8538 one_reg = only_one_reg_in_list (range);
8539 if (from_push_pop_mnem && one_reg >= 0)
8541 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8543 inst.instruction &= A_COND_MASK;
8544 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8545 inst.instruction |= one_reg << 12;
8552 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8555 /* ARMv5TE load-consecutive (argument parse)
8564 constraint (inst.operands[0].reg % 2 != 0,
8565 _("first transfer register must be even"));
8566 constraint (inst.operands[1].present
8567 && inst.operands[1].reg != inst.operands[0].reg + 1,
8568 _("can only transfer two consecutive registers"));
8569 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8570 constraint (!inst.operands[2].isreg, _("'[' expected"));
8572 if (!inst.operands[1].present)
8573 inst.operands[1].reg = inst.operands[0].reg + 1;
8575 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8576 register and the first register written; we have to diagnose
8577 overlap between the base and the second register written here. */
8579 if (inst.operands[2].reg == inst.operands[1].reg
8580 && (inst.operands[2].writeback || inst.operands[2].postind))
8581 as_warn (_("base register written back, and overlaps "
8582 "second transfer register"));
8584 if (!(inst.instruction & V4_STR_BIT))
8586 /* For an index-register load, the index register must not overlap the
8587 destination (even if not write-back). */
8588 if (inst.operands[2].immisreg
8589 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8590 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8591 as_warn (_("index register overlaps transfer register"));
8593 inst.instruction |= inst.operands[0].reg << 12;
8594 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8600 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8601 || inst.operands[1].postind || inst.operands[1].writeback
8602 || inst.operands[1].immisreg || inst.operands[1].shifted
8603 || inst.operands[1].negative
8604 /* This can arise if the programmer has written
8606 or if they have mistakenly used a register name as the last
8609 It is very difficult to distinguish between these two cases
8610 because "rX" might actually be a label. ie the register
8611 name has been occluded by a symbol of the same name. So we
8612 just generate a general 'bad addressing mode' type error
8613 message and leave it up to the programmer to discover the
8614 true cause and fix their mistake. */
8615 || (inst.operands[1].reg == REG_PC),
8618 constraint (inst.reloc.exp.X_op != O_constant
8619 || inst.reloc.exp.X_add_number != 0,
8620 _("offset must be zero in ARM encoding"));
8622 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
8624 inst.instruction |= inst.operands[0].reg << 12;
8625 inst.instruction |= inst.operands[1].reg << 16;
8626 inst.reloc.type = BFD_RELOC_UNUSED;
8632 constraint (inst.operands[0].reg % 2 != 0,
8633 _("even register required"));
8634 constraint (inst.operands[1].present
8635 && inst.operands[1].reg != inst.operands[0].reg + 1,
8636 _("can only load two consecutive registers"));
8637 /* If op 1 were present and equal to PC, this function wouldn't
8638 have been called in the first place. */
8639 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8641 inst.instruction |= inst.operands[0].reg << 12;
8642 inst.instruction |= inst.operands[2].reg << 16;
8645 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
8646 which is not a multiple of four is UNPREDICTABLE. */
8648 check_ldr_r15_aligned (void)
8650 constraint (!(inst.operands[1].immisreg)
8651 && (inst.operands[0].reg == REG_PC
8652 && inst.operands[1].reg == REG_PC
8653 && (inst.reloc.exp.X_add_number & 0x3)),
8654 _("ldr to register 15 must be 4-byte alligned"));
8660 inst.instruction |= inst.operands[0].reg << 12;
8661 if (!inst.operands[1].isreg)
8662 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
8664 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
8665 check_ldr_r15_aligned ();
8671 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8673 if (inst.operands[1].preind)
8675 constraint (inst.reloc.exp.X_op != O_constant
8676 || inst.reloc.exp.X_add_number != 0,
8677 _("this instruction requires a post-indexed address"));
8679 inst.operands[1].preind = 0;
8680 inst.operands[1].postind = 1;
8681 inst.operands[1].writeback = 1;
8683 inst.instruction |= inst.operands[0].reg << 12;
8684 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
8687 /* Halfword and signed-byte load/store operations. */
8692 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8693 inst.instruction |= inst.operands[0].reg << 12;
8694 if (!inst.operands[1].isreg)
8695 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
8697 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
8703 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
8705 if (inst.operands[1].preind)
8707 constraint (inst.reloc.exp.X_op != O_constant
8708 || inst.reloc.exp.X_add_number != 0,
8709 _("this instruction requires a post-indexed address"));
8711 inst.operands[1].preind = 0;
8712 inst.operands[1].postind = 1;
8713 inst.operands[1].writeback = 1;
8715 inst.instruction |= inst.operands[0].reg << 12;
8716 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
8719 /* Co-processor register load/store.
8720 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
8724 inst.instruction |= inst.operands[0].reg << 8;
8725 inst.instruction |= inst.operands[1].reg << 12;
8726 encode_arm_cp_address (2, TRUE, TRUE, 0);
8732 /* This restriction does not apply to mls (nor to mla in v6 or later). */
8733 if (inst.operands[0].reg == inst.operands[1].reg
8734 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
8735 && !(inst.instruction & 0x00400000))
8736 as_tsktsk (_("Rd and Rm should be different in mla"));
8738 inst.instruction |= inst.operands[0].reg << 16;
8739 inst.instruction |= inst.operands[1].reg;
8740 inst.instruction |= inst.operands[2].reg << 8;
8741 inst.instruction |= inst.operands[3].reg << 12;
8747 inst.instruction |= inst.operands[0].reg << 12;
8748 encode_arm_shifter_operand (1);
8751 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
8758 top = (inst.instruction & 0x00400000) != 0;
8759 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
8760 _(":lower16: not allowed this instruction"));
8761 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
8762 _(":upper16: not allowed instruction"));
8763 inst.instruction |= inst.operands[0].reg << 12;
8764 if (inst.reloc.type == BFD_RELOC_UNUSED)
8766 imm = inst.reloc.exp.X_add_number;
8767 /* The value is in two pieces: 0:11, 16:19. */
8768 inst.instruction |= (imm & 0x00000fff);
8769 inst.instruction |= (imm & 0x0000f000) << 4;
8773 static void do_vfp_nsyn_opcode (const char *);
8776 do_vfp_nsyn_mrs (void)
8778 if (inst.operands[0].isvec)
8780 if (inst.operands[1].reg != 1)
8781 first_error (_("operand 1 must be FPSCR"));
8782 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
8783 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
8784 do_vfp_nsyn_opcode ("fmstat");
8786 else if (inst.operands[1].isvec)
8787 do_vfp_nsyn_opcode ("fmrx");
8795 do_vfp_nsyn_msr (void)
8797 if (inst.operands[0].isvec)
8798 do_vfp_nsyn_opcode ("fmxr");
8808 unsigned Rt = inst.operands[0].reg;
8810 if (thumb_mode && Rt == REG_SP)
8812 inst.error = BAD_SP;
8816 /* APSR_ sets isvec. All other refs to PC are illegal. */
8817 if (!inst.operands[0].isvec && Rt == REG_PC)
8819 inst.error = BAD_PC;
8823 /* If we get through parsing the register name, we just insert the number
8824 generated into the instruction without further validation. */
8825 inst.instruction |= (inst.operands[1].reg << 16);
8826 inst.instruction |= (Rt << 12);
8832 unsigned Rt = inst.operands[1].reg;
8835 reject_bad_reg (Rt);
8836 else if (Rt == REG_PC)
8838 inst.error = BAD_PC;
8842 /* If we get through parsing the register name, we just insert the number
8843 generated into the instruction without further validation. */
8844 inst.instruction |= (inst.operands[0].reg << 16);
8845 inst.instruction |= (Rt << 12);
8853 if (do_vfp_nsyn_mrs () == SUCCESS)
8856 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
8857 inst.instruction |= inst.operands[0].reg << 12;
8859 if (inst.operands[1].isreg)
8861 br = inst.operands[1].reg;
8862 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf000))
8863 as_bad (_("bad register for mrs"));
8867 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8868 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
8870 _("'APSR', 'CPSR' or 'SPSR' expected"));
8871 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
8874 inst.instruction |= br;
8877 /* Two possible forms:
8878 "{C|S}PSR_<field>, Rm",
8879 "{C|S}PSR_f, #expression". */
8884 if (do_vfp_nsyn_msr () == SUCCESS)
8887 inst.instruction |= inst.operands[0].imm;
8888 if (inst.operands[1].isreg)
8889 inst.instruction |= inst.operands[1].reg;
8892 inst.instruction |= INST_IMMEDIATE;
8893 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8894 inst.reloc.pc_rel = 0;
8901 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
8903 if (!inst.operands[2].present)
8904 inst.operands[2].reg = inst.operands[0].reg;
8905 inst.instruction |= inst.operands[0].reg << 16;
8906 inst.instruction |= inst.operands[1].reg;
8907 inst.instruction |= inst.operands[2].reg << 8;
8909 if (inst.operands[0].reg == inst.operands[1].reg
8910 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8911 as_tsktsk (_("Rd and Rm should be different in mul"));
8914 /* Long Multiply Parser
8915 UMULL RdLo, RdHi, Rm, Rs
8916 SMULL RdLo, RdHi, Rm, Rs
8917 UMLAL RdLo, RdHi, Rm, Rs
8918 SMLAL RdLo, RdHi, Rm, Rs. */
8923 inst.instruction |= inst.operands[0].reg << 12;
8924 inst.instruction |= inst.operands[1].reg << 16;
8925 inst.instruction |= inst.operands[2].reg;
8926 inst.instruction |= inst.operands[3].reg << 8;
8928 /* rdhi and rdlo must be different. */
8929 if (inst.operands[0].reg == inst.operands[1].reg)
8930 as_tsktsk (_("rdhi and rdlo must be different"));
8932 /* rdhi, rdlo and rm must all be different before armv6. */
8933 if ((inst.operands[0].reg == inst.operands[2].reg
8934 || inst.operands[1].reg == inst.operands[2].reg)
8935 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
8936 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
8942 if (inst.operands[0].present
8943 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
8945 /* Architectural NOP hints are CPSR sets with no bits selected. */
8946 inst.instruction &= 0xf0000000;
8947 inst.instruction |= 0x0320f000;
8948 if (inst.operands[0].present)
8949 inst.instruction |= inst.operands[0].imm;
8953 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
8954 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
8955 Condition defaults to COND_ALWAYS.
8956 Error if Rd, Rn or Rm are R15. */
8961 inst.instruction |= inst.operands[0].reg << 12;
8962 inst.instruction |= inst.operands[1].reg << 16;
8963 inst.instruction |= inst.operands[2].reg;
8964 if (inst.operands[3].present)
8965 encode_arm_shift (3);
8968 /* ARM V6 PKHTB (Argument Parse). */
8973 if (!inst.operands[3].present)
8975 /* If the shift specifier is omitted, turn the instruction
8976 into pkhbt rd, rm, rn. */
8977 inst.instruction &= 0xfff00010;
8978 inst.instruction |= inst.operands[0].reg << 12;
8979 inst.instruction |= inst.operands[1].reg;
8980 inst.instruction |= inst.operands[2].reg << 16;
8984 inst.instruction |= inst.operands[0].reg << 12;
8985 inst.instruction |= inst.operands[1].reg << 16;
8986 inst.instruction |= inst.operands[2].reg;
8987 encode_arm_shift (3);
8991 /* ARMv5TE: Preload-Cache
8992 MP Extensions: Preload for write
8996 Syntactically, like LDR with B=1, W=0, L=1. */
9001 constraint (!inst.operands[0].isreg,
9002 _("'[' expected after PLD mnemonic"));
9003 constraint (inst.operands[0].postind,
9004 _("post-indexed expression used in preload instruction"));
9005 constraint (inst.operands[0].writeback,
9006 _("writeback used in preload instruction"));
9007 constraint (!inst.operands[0].preind,
9008 _("unindexed addressing used in preload instruction"));
9009 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9012 /* ARMv7: PLI <addr_mode> */
9016 constraint (!inst.operands[0].isreg,
9017 _("'[' expected after PLI mnemonic"));
9018 constraint (inst.operands[0].postind,
9019 _("post-indexed expression used in preload instruction"));
9020 constraint (inst.operands[0].writeback,
9021 _("writeback used in preload instruction"));
9022 constraint (!inst.operands[0].preind,
9023 _("unindexed addressing used in preload instruction"));
9024 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9025 inst.instruction &= ~PRE_INDEX;
9031 inst.operands[1] = inst.operands[0];
9032 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9033 inst.operands[0].isreg = 1;
9034 inst.operands[0].writeback = 1;
9035 inst.operands[0].reg = REG_SP;
9036 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9039 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9040 word at the specified address and the following word
9042 Unconditionally executed.
9043 Error if Rn is R15. */
9048 inst.instruction |= inst.operands[0].reg << 16;
9049 if (inst.operands[0].writeback)
9050 inst.instruction |= WRITE_BACK;
9053 /* ARM V6 ssat (argument parse). */
9058 inst.instruction |= inst.operands[0].reg << 12;
9059 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9060 inst.instruction |= inst.operands[2].reg;
9062 if (inst.operands[3].present)
9063 encode_arm_shift (3);
9066 /* ARM V6 usat (argument parse). */
9071 inst.instruction |= inst.operands[0].reg << 12;
9072 inst.instruction |= inst.operands[1].imm << 16;
9073 inst.instruction |= inst.operands[2].reg;
9075 if (inst.operands[3].present)
9076 encode_arm_shift (3);
9079 /* ARM V6 ssat16 (argument parse). */
9084 inst.instruction |= inst.operands[0].reg << 12;
9085 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9086 inst.instruction |= inst.operands[2].reg;
9092 inst.instruction |= inst.operands[0].reg << 12;
9093 inst.instruction |= inst.operands[1].imm << 16;
9094 inst.instruction |= inst.operands[2].reg;
9097 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9098 preserving the other bits.
9100 setend <endian_specifier>, where <endian_specifier> is either
9106 if (warn_on_deprecated
9107 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9108 as_warn (_("setend use is deprecated for ARMv8"));
9110 if (inst.operands[0].imm)
9111 inst.instruction |= 0x200;
9117 unsigned int Rm = (inst.operands[1].present
9118 ? inst.operands[1].reg
9119 : inst.operands[0].reg);
9121 inst.instruction |= inst.operands[0].reg << 12;
9122 inst.instruction |= Rm;
9123 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9125 inst.instruction |= inst.operands[2].reg << 8;
9126 inst.instruction |= SHIFT_BY_REG;
9127 /* PR 12854: Error on extraneous shifts. */
9128 constraint (inst.operands[2].shifted,
9129 _("extraneous shift as part of operand to shift insn"));
9132 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9138 inst.reloc.type = BFD_RELOC_ARM_SMC;
9139 inst.reloc.pc_rel = 0;
9145 inst.reloc.type = BFD_RELOC_ARM_HVC;
9146 inst.reloc.pc_rel = 0;
9152 inst.reloc.type = BFD_RELOC_ARM_SWI;
9153 inst.reloc.pc_rel = 0;
9156 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9157 SMLAxy{cond} Rd,Rm,Rs,Rn
9158 SMLAWy{cond} Rd,Rm,Rs,Rn
9159 Error if any register is R15. */
9164 inst.instruction |= inst.operands[0].reg << 16;
9165 inst.instruction |= inst.operands[1].reg;
9166 inst.instruction |= inst.operands[2].reg << 8;
9167 inst.instruction |= inst.operands[3].reg << 12;
9170 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9171 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9172 Error if any register is R15.
9173 Warning if Rdlo == Rdhi. */
9178 inst.instruction |= inst.operands[0].reg << 12;
9179 inst.instruction |= inst.operands[1].reg << 16;
9180 inst.instruction |= inst.operands[2].reg;
9181 inst.instruction |= inst.operands[3].reg << 8;
9183 if (inst.operands[0].reg == inst.operands[1].reg)
9184 as_tsktsk (_("rdhi and rdlo must be different"));
9187 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9188 SMULxy{cond} Rd,Rm,Rs
9189 Error if any register is R15. */
9194 inst.instruction |= inst.operands[0].reg << 16;
9195 inst.instruction |= inst.operands[1].reg;
9196 inst.instruction |= inst.operands[2].reg << 8;
9199 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9200 the same for both ARM and Thumb-2. */
9207 if (inst.operands[0].present)
9209 reg = inst.operands[0].reg;
9210 constraint (reg != REG_SP, _("SRS base register must be r13"));
9215 inst.instruction |= reg << 16;
9216 inst.instruction |= inst.operands[1].imm;
9217 if (inst.operands[0].writeback || inst.operands[1].writeback)
9218 inst.instruction |= WRITE_BACK;
9221 /* ARM V6 strex (argument parse). */
9226 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9227 || inst.operands[2].postind || inst.operands[2].writeback
9228 || inst.operands[2].immisreg || inst.operands[2].shifted
9229 || inst.operands[2].negative
9230 /* See comment in do_ldrex(). */
9231 || (inst.operands[2].reg == REG_PC),
9234 constraint (inst.operands[0].reg == inst.operands[1].reg
9235 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9237 constraint (inst.reloc.exp.X_op != O_constant
9238 || inst.reloc.exp.X_add_number != 0,
9239 _("offset must be zero in ARM encoding"));
9241 inst.instruction |= inst.operands[0].reg << 12;
9242 inst.instruction |= inst.operands[1].reg;
9243 inst.instruction |= inst.operands[2].reg << 16;
9244 inst.reloc.type = BFD_RELOC_UNUSED;
9250 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9251 || inst.operands[2].postind || inst.operands[2].writeback
9252 || inst.operands[2].immisreg || inst.operands[2].shifted
9253 || inst.operands[2].negative,
9256 constraint (inst.operands[0].reg == inst.operands[1].reg
9257 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9265 constraint (inst.operands[1].reg % 2 != 0,
9266 _("even register required"));
9267 constraint (inst.operands[2].present
9268 && inst.operands[2].reg != inst.operands[1].reg + 1,
9269 _("can only store two consecutive registers"));
9270 /* If op 2 were present and equal to PC, this function wouldn't
9271 have been called in the first place. */
9272 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9274 constraint (inst.operands[0].reg == inst.operands[1].reg
9275 || inst.operands[0].reg == inst.operands[1].reg + 1
9276 || inst.operands[0].reg == inst.operands[3].reg,
9279 inst.instruction |= inst.operands[0].reg << 12;
9280 inst.instruction |= inst.operands[1].reg;
9281 inst.instruction |= inst.operands[3].reg << 16;
9288 constraint (inst.operands[0].reg == inst.operands[1].reg
9289 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9297 constraint (inst.operands[0].reg == inst.operands[1].reg
9298 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9303 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9304 extends it to 32-bits, and adds the result to a value in another
9305 register. You can specify a rotation by 0, 8, 16, or 24 bits
9306 before extracting the 16-bit value.
9307 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9308 Condition defaults to COND_ALWAYS.
9309 Error if any register uses R15. */
9314 inst.instruction |= inst.operands[0].reg << 12;
9315 inst.instruction |= inst.operands[1].reg << 16;
9316 inst.instruction |= inst.operands[2].reg;
9317 inst.instruction |= inst.operands[3].imm << 10;
9322 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9323 Condition defaults to COND_ALWAYS.
9324 Error if any register uses R15. */
9329 inst.instruction |= inst.operands[0].reg << 12;
9330 inst.instruction |= inst.operands[1].reg;
9331 inst.instruction |= inst.operands[2].imm << 10;
9334 /* VFP instructions. In a logical order: SP variant first, monad
9335 before dyad, arithmetic then move then load/store. */
9338 do_vfp_sp_monadic (void)
9340 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9341 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9345 do_vfp_sp_dyadic (void)
9347 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9348 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9349 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9353 do_vfp_sp_compare_z (void)
9355 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9359 do_vfp_dp_sp_cvt (void)
9361 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9362 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9366 do_vfp_sp_dp_cvt (void)
9368 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9369 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9373 do_vfp_reg_from_sp (void)
9375 inst.instruction |= inst.operands[0].reg << 12;
9376 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9380 do_vfp_reg2_from_sp2 (void)
9382 constraint (inst.operands[2].imm != 2,
9383 _("only two consecutive VFP SP registers allowed here"));
9384 inst.instruction |= inst.operands[0].reg << 12;
9385 inst.instruction |= inst.operands[1].reg << 16;
9386 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9390 do_vfp_sp_from_reg (void)
9392 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9393 inst.instruction |= inst.operands[1].reg << 12;
9397 do_vfp_sp2_from_reg2 (void)
9399 constraint (inst.operands[0].imm != 2,
9400 _("only two consecutive VFP SP registers allowed here"));
9401 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9402 inst.instruction |= inst.operands[1].reg << 12;
9403 inst.instruction |= inst.operands[2].reg << 16;
9407 do_vfp_sp_ldst (void)
9409 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9410 encode_arm_cp_address (1, FALSE, TRUE, 0);
9414 do_vfp_dp_ldst (void)
9416 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9417 encode_arm_cp_address (1, FALSE, TRUE, 0);
9422 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9424 if (inst.operands[0].writeback)
9425 inst.instruction |= WRITE_BACK;
9427 constraint (ldstm_type != VFP_LDSTMIA,
9428 _("this addressing mode requires base-register writeback"));
9429 inst.instruction |= inst.operands[0].reg << 16;
9430 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9431 inst.instruction |= inst.operands[1].imm;
9435 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9439 if (inst.operands[0].writeback)
9440 inst.instruction |= WRITE_BACK;
9442 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9443 _("this addressing mode requires base-register writeback"));
9445 inst.instruction |= inst.operands[0].reg << 16;
9446 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9448 count = inst.operands[1].imm << 1;
9449 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9452 inst.instruction |= count;
9456 do_vfp_sp_ldstmia (void)
9458 vfp_sp_ldstm (VFP_LDSTMIA);
9462 do_vfp_sp_ldstmdb (void)
9464 vfp_sp_ldstm (VFP_LDSTMDB);
9468 do_vfp_dp_ldstmia (void)
9470 vfp_dp_ldstm (VFP_LDSTMIA);
9474 do_vfp_dp_ldstmdb (void)
9476 vfp_dp_ldstm (VFP_LDSTMDB);
9480 do_vfp_xp_ldstmia (void)
9482 vfp_dp_ldstm (VFP_LDSTMIAX);
9486 do_vfp_xp_ldstmdb (void)
9488 vfp_dp_ldstm (VFP_LDSTMDBX);
9492 do_vfp_dp_rd_rm (void)
9494 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9495 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9499 do_vfp_dp_rn_rd (void)
9501 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9502 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9506 do_vfp_dp_rd_rn (void)
9508 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9509 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9513 do_vfp_dp_rd_rn_rm (void)
9515 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9516 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9517 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9523 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9527 do_vfp_dp_rm_rd_rn (void)
9529 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9530 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9531 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9534 /* VFPv3 instructions. */
9536 do_vfp_sp_const (void)
9538 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9539 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9540 inst.instruction |= (inst.operands[1].imm & 0x0f);
9544 do_vfp_dp_const (void)
9546 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9547 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9548 inst.instruction |= (inst.operands[1].imm & 0x0f);
9552 vfp_conv (int srcsize)
9554 int immbits = srcsize - inst.operands[1].imm;
9556 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9558 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9559 i.e. immbits must be in range 0 - 16. */
9560 inst.error = _("immediate value out of range, expected range [0, 16]");
9563 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9565 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9566 i.e. immbits must be in range 0 - 31. */
9567 inst.error = _("immediate value out of range, expected range [1, 32]");
9571 inst.instruction |= (immbits & 1) << 5;
9572 inst.instruction |= (immbits >> 1);
9576 do_vfp_sp_conv_16 (void)
9578 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9583 do_vfp_dp_conv_16 (void)
9585 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9590 do_vfp_sp_conv_32 (void)
9592 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9597 do_vfp_dp_conv_32 (void)
9599 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9603 /* FPA instructions. Also in a logical order. */
9608 inst.instruction |= inst.operands[0].reg << 16;
9609 inst.instruction |= inst.operands[1].reg;
9613 do_fpa_ldmstm (void)
9615 inst.instruction |= inst.operands[0].reg << 12;
9616 switch (inst.operands[1].imm)
9618 case 1: inst.instruction |= CP_T_X; break;
9619 case 2: inst.instruction |= CP_T_Y; break;
9620 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
9625 if (inst.instruction & (PRE_INDEX | INDEX_UP))
9627 /* The instruction specified "ea" or "fd", so we can only accept
9628 [Rn]{!}. The instruction does not really support stacking or
9629 unstacking, so we have to emulate these by setting appropriate
9630 bits and offsets. */
9631 constraint (inst.reloc.exp.X_op != O_constant
9632 || inst.reloc.exp.X_add_number != 0,
9633 _("this instruction does not support indexing"));
9635 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
9636 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
9638 if (!(inst.instruction & INDEX_UP))
9639 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
9641 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
9643 inst.operands[2].preind = 0;
9644 inst.operands[2].postind = 1;
9648 encode_arm_cp_address (2, TRUE, TRUE, 0);
9651 /* iWMMXt instructions: strictly in alphabetical order. */
9654 do_iwmmxt_tandorc (void)
9656 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
9660 do_iwmmxt_textrc (void)
9662 inst.instruction |= inst.operands[0].reg << 12;
9663 inst.instruction |= inst.operands[1].imm;
9667 do_iwmmxt_textrm (void)
9669 inst.instruction |= inst.operands[0].reg << 12;
9670 inst.instruction |= inst.operands[1].reg << 16;
9671 inst.instruction |= inst.operands[2].imm;
9675 do_iwmmxt_tinsr (void)
9677 inst.instruction |= inst.operands[0].reg << 16;
9678 inst.instruction |= inst.operands[1].reg << 12;
9679 inst.instruction |= inst.operands[2].imm;
9683 do_iwmmxt_tmia (void)
9685 inst.instruction |= inst.operands[0].reg << 5;
9686 inst.instruction |= inst.operands[1].reg;
9687 inst.instruction |= inst.operands[2].reg << 12;
9691 do_iwmmxt_waligni (void)
9693 inst.instruction |= inst.operands[0].reg << 12;
9694 inst.instruction |= inst.operands[1].reg << 16;
9695 inst.instruction |= inst.operands[2].reg;
9696 inst.instruction |= inst.operands[3].imm << 20;
9700 do_iwmmxt_wmerge (void)
9702 inst.instruction |= inst.operands[0].reg << 12;
9703 inst.instruction |= inst.operands[1].reg << 16;
9704 inst.instruction |= inst.operands[2].reg;
9705 inst.instruction |= inst.operands[3].imm << 21;
9709 do_iwmmxt_wmov (void)
9711 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
9712 inst.instruction |= inst.operands[0].reg << 12;
9713 inst.instruction |= inst.operands[1].reg << 16;
9714 inst.instruction |= inst.operands[1].reg;
9718 do_iwmmxt_wldstbh (void)
9721 inst.instruction |= inst.operands[0].reg << 12;
9723 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
9725 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
9726 encode_arm_cp_address (1, TRUE, FALSE, reloc);
9730 do_iwmmxt_wldstw (void)
9732 /* RIWR_RIWC clears .isreg for a control register. */
9733 if (!inst.operands[0].isreg)
9735 constraint (inst.cond != COND_ALWAYS, BAD_COND);
9736 inst.instruction |= 0xf0000000;
9739 inst.instruction |= inst.operands[0].reg << 12;
9740 encode_arm_cp_address (1, TRUE, TRUE, 0);
9744 do_iwmmxt_wldstd (void)
9746 inst.instruction |= inst.operands[0].reg << 12;
9747 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
9748 && inst.operands[1].immisreg)
9750 inst.instruction &= ~0x1a000ff;
9751 inst.instruction |= (0xf << 28);
9752 if (inst.operands[1].preind)
9753 inst.instruction |= PRE_INDEX;
9754 if (!inst.operands[1].negative)
9755 inst.instruction |= INDEX_UP;
9756 if (inst.operands[1].writeback)
9757 inst.instruction |= WRITE_BACK;
9758 inst.instruction |= inst.operands[1].reg << 16;
9759 inst.instruction |= inst.reloc.exp.X_add_number << 4;
9760 inst.instruction |= inst.operands[1].imm;
9763 encode_arm_cp_address (1, TRUE, FALSE, 0);
9767 do_iwmmxt_wshufh (void)
9769 inst.instruction |= inst.operands[0].reg << 12;
9770 inst.instruction |= inst.operands[1].reg << 16;
9771 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
9772 inst.instruction |= (inst.operands[2].imm & 0x0f);
9776 do_iwmmxt_wzero (void)
9778 /* WZERO reg is an alias for WANDN reg, reg, reg. */
9779 inst.instruction |= inst.operands[0].reg;
9780 inst.instruction |= inst.operands[0].reg << 12;
9781 inst.instruction |= inst.operands[0].reg << 16;
9785 do_iwmmxt_wrwrwr_or_imm5 (void)
9787 if (inst.operands[2].isreg)
9790 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
9791 _("immediate operand requires iWMMXt2"));
9793 if (inst.operands[2].imm == 0)
9795 switch ((inst.instruction >> 20) & 0xf)
9801 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
9802 inst.operands[2].imm = 16;
9803 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
9809 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
9810 inst.operands[2].imm = 32;
9811 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
9818 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
9820 wrn = (inst.instruction >> 16) & 0xf;
9821 inst.instruction &= 0xff0fff0f;
9822 inst.instruction |= wrn;
9823 /* Bail out here; the instruction is now assembled. */
9828 /* Map 32 -> 0, etc. */
9829 inst.operands[2].imm &= 0x1f;
9830 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
9834 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
9835 operations first, then control, shift, and load/store. */
9837 /* Insns like "foo X,Y,Z". */
9840 do_mav_triple (void)
9842 inst.instruction |= inst.operands[0].reg << 16;
9843 inst.instruction |= inst.operands[1].reg;
9844 inst.instruction |= inst.operands[2].reg << 12;
9847 /* Insns like "foo W,X,Y,Z".
9848 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
9853 inst.instruction |= inst.operands[0].reg << 5;
9854 inst.instruction |= inst.operands[1].reg << 12;
9855 inst.instruction |= inst.operands[2].reg << 16;
9856 inst.instruction |= inst.operands[3].reg;
9859 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
9863 inst.instruction |= inst.operands[1].reg << 12;
9866 /* Maverick shift immediate instructions.
9867 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
9868 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
9873 int imm = inst.operands[2].imm;
9875 inst.instruction |= inst.operands[0].reg << 12;
9876 inst.instruction |= inst.operands[1].reg << 16;
9878 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
9879 Bits 5-7 of the insn should have bits 4-6 of the immediate.
9880 Bit 4 should be 0. */
9881 imm = (imm & 0xf) | ((imm & 0x70) << 1);
9883 inst.instruction |= imm;
9886 /* XScale instructions. Also sorted arithmetic before move. */
9888 /* Xscale multiply-accumulate (argument parse)
9891 MIAxycc acc0,Rm,Rs. */
9896 inst.instruction |= inst.operands[1].reg;
9897 inst.instruction |= inst.operands[2].reg << 12;
9900 /* Xscale move-accumulator-register (argument parse)
9902 MARcc acc0,RdLo,RdHi. */
9907 inst.instruction |= inst.operands[1].reg << 12;
9908 inst.instruction |= inst.operands[2].reg << 16;
9911 /* Xscale move-register-accumulator (argument parse)
9913 MRAcc RdLo,RdHi,acc0. */
9918 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
9919 inst.instruction |= inst.operands[0].reg << 12;
9920 inst.instruction |= inst.operands[1].reg << 16;
9923 /* Encoding functions relevant only to Thumb. */
9925 /* inst.operands[i] is a shifted-register operand; encode
9926 it into inst.instruction in the format used by Thumb32. */
9929 encode_thumb32_shifted_operand (int i)
9931 unsigned int value = inst.reloc.exp.X_add_number;
9932 unsigned int shift = inst.operands[i].shift_kind;
9934 constraint (inst.operands[i].immisreg,
9935 _("shift by register not allowed in thumb mode"));
9936 inst.instruction |= inst.operands[i].reg;
9937 if (shift == SHIFT_RRX)
9938 inst.instruction |= SHIFT_ROR << 4;
9941 constraint (inst.reloc.exp.X_op != O_constant,
9942 _("expression too complex"));
9944 constraint (value > 32
9945 || (value == 32 && (shift == SHIFT_LSL
9946 || shift == SHIFT_ROR)),
9947 _("shift expression is too large"));
9951 else if (value == 32)
9954 inst.instruction |= shift << 4;
9955 inst.instruction |= (value & 0x1c) << 10;
9956 inst.instruction |= (value & 0x03) << 6;
9961 /* inst.operands[i] was set up by parse_address. Encode it into a
9962 Thumb32 format load or store instruction. Reject forms that cannot
9963 be used with such instructions. If is_t is true, reject forms that
9964 cannot be used with a T instruction; if is_d is true, reject forms
9965 that cannot be used with a D instruction. If it is a store insn,
9969 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
9971 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
9973 constraint (!inst.operands[i].isreg,
9974 _("Instruction does not support =N addresses"));
9976 inst.instruction |= inst.operands[i].reg << 16;
9977 if (inst.operands[i].immisreg)
9979 constraint (is_pc, BAD_PC_ADDRESSING);
9980 constraint (is_t || is_d, _("cannot use register index with this instruction"));
9981 constraint (inst.operands[i].negative,
9982 _("Thumb does not support negative register indexing"));
9983 constraint (inst.operands[i].postind,
9984 _("Thumb does not support register post-indexing"));
9985 constraint (inst.operands[i].writeback,
9986 _("Thumb does not support register indexing with writeback"));
9987 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
9988 _("Thumb supports only LSL in shifted register indexing"));
9990 inst.instruction |= inst.operands[i].imm;
9991 if (inst.operands[i].shifted)
9993 constraint (inst.reloc.exp.X_op != O_constant,
9994 _("expression too complex"));
9995 constraint (inst.reloc.exp.X_add_number < 0
9996 || inst.reloc.exp.X_add_number > 3,
9997 _("shift out of range"));
9998 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10000 inst.reloc.type = BFD_RELOC_UNUSED;
10002 else if (inst.operands[i].preind)
10004 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10005 constraint (is_t && inst.operands[i].writeback,
10006 _("cannot use writeback with this instruction"));
10007 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10008 BAD_PC_ADDRESSING);
10012 inst.instruction |= 0x01000000;
10013 if (inst.operands[i].writeback)
10014 inst.instruction |= 0x00200000;
10018 inst.instruction |= 0x00000c00;
10019 if (inst.operands[i].writeback)
10020 inst.instruction |= 0x00000100;
10022 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10024 else if (inst.operands[i].postind)
10026 gas_assert (inst.operands[i].writeback);
10027 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10028 constraint (is_t, _("cannot use post-indexing with this instruction"));
10031 inst.instruction |= 0x00200000;
10033 inst.instruction |= 0x00000900;
10034 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10036 else /* unindexed - only for coprocessor */
10037 inst.error = _("instruction does not accept unindexed addressing");
10040 /* Table of Thumb instructions which exist in both 16- and 32-bit
10041 encodings (the latter only in post-V6T2 cores). The index is the
10042 value used in the insns table below. When there is more than one
10043 possible 16-bit encoding for the instruction, this table always
10045 Also contains several pseudo-instructions used during relaxation. */
10046 #define T16_32_TAB \
10047 X(_adc, 4140, eb400000), \
10048 X(_adcs, 4140, eb500000), \
10049 X(_add, 1c00, eb000000), \
10050 X(_adds, 1c00, eb100000), \
10051 X(_addi, 0000, f1000000), \
10052 X(_addis, 0000, f1100000), \
10053 X(_add_pc,000f, f20f0000), \
10054 X(_add_sp,000d, f10d0000), \
10055 X(_adr, 000f, f20f0000), \
10056 X(_and, 4000, ea000000), \
10057 X(_ands, 4000, ea100000), \
10058 X(_asr, 1000, fa40f000), \
10059 X(_asrs, 1000, fa50f000), \
10060 X(_b, e000, f000b000), \
10061 X(_bcond, d000, f0008000), \
10062 X(_bic, 4380, ea200000), \
10063 X(_bics, 4380, ea300000), \
10064 X(_cmn, 42c0, eb100f00), \
10065 X(_cmp, 2800, ebb00f00), \
10066 X(_cpsie, b660, f3af8400), \
10067 X(_cpsid, b670, f3af8600), \
10068 X(_cpy, 4600, ea4f0000), \
10069 X(_dec_sp,80dd, f1ad0d00), \
10070 X(_eor, 4040, ea800000), \
10071 X(_eors, 4040, ea900000), \
10072 X(_inc_sp,00dd, f10d0d00), \
10073 X(_ldmia, c800, e8900000), \
10074 X(_ldr, 6800, f8500000), \
10075 X(_ldrb, 7800, f8100000), \
10076 X(_ldrh, 8800, f8300000), \
10077 X(_ldrsb, 5600, f9100000), \
10078 X(_ldrsh, 5e00, f9300000), \
10079 X(_ldr_pc,4800, f85f0000), \
10080 X(_ldr_pc2,4800, f85f0000), \
10081 X(_ldr_sp,9800, f85d0000), \
10082 X(_lsl, 0000, fa00f000), \
10083 X(_lsls, 0000, fa10f000), \
10084 X(_lsr, 0800, fa20f000), \
10085 X(_lsrs, 0800, fa30f000), \
10086 X(_mov, 2000, ea4f0000), \
10087 X(_movs, 2000, ea5f0000), \
10088 X(_mul, 4340, fb00f000), \
10089 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10090 X(_mvn, 43c0, ea6f0000), \
10091 X(_mvns, 43c0, ea7f0000), \
10092 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10093 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10094 X(_orr, 4300, ea400000), \
10095 X(_orrs, 4300, ea500000), \
10096 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10097 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10098 X(_rev, ba00, fa90f080), \
10099 X(_rev16, ba40, fa90f090), \
10100 X(_revsh, bac0, fa90f0b0), \
10101 X(_ror, 41c0, fa60f000), \
10102 X(_rors, 41c0, fa70f000), \
10103 X(_sbc, 4180, eb600000), \
10104 X(_sbcs, 4180, eb700000), \
10105 X(_stmia, c000, e8800000), \
10106 X(_str, 6000, f8400000), \
10107 X(_strb, 7000, f8000000), \
10108 X(_strh, 8000, f8200000), \
10109 X(_str_sp,9000, f84d0000), \
10110 X(_sub, 1e00, eba00000), \
10111 X(_subs, 1e00, ebb00000), \
10112 X(_subi, 8000, f1a00000), \
10113 X(_subis, 8000, f1b00000), \
10114 X(_sxtb, b240, fa4ff080), \
10115 X(_sxth, b200, fa0ff080), \
10116 X(_tst, 4200, ea100f00), \
10117 X(_uxtb, b2c0, fa5ff080), \
10118 X(_uxth, b280, fa1ff080), \
10119 X(_nop, bf00, f3af8000), \
10120 X(_yield, bf10, f3af8001), \
10121 X(_wfe, bf20, f3af8002), \
10122 X(_wfi, bf30, f3af8003), \
10123 X(_sev, bf40, f3af8004), \
10124 X(_sevl, bf50, f3af8005), \
10125 X(_udf, de00, f7f0a000)
10127 /* To catch errors in encoding functions, the codes are all offset by
10128 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10129 as 16-bit instructions. */
10130 #define X(a,b,c) T_MNEM##a
10131 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10134 #define X(a,b,c) 0x##b
10135 static const unsigned short thumb_op16[] = { T16_32_TAB };
10136 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10139 #define X(a,b,c) 0x##c
10140 static const unsigned int thumb_op32[] = { T16_32_TAB };
10141 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10142 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10146 /* Thumb instruction encoders, in alphabetical order. */
10148 /* ADDW or SUBW. */
10151 do_t_add_sub_w (void)
10155 Rd = inst.operands[0].reg;
10156 Rn = inst.operands[1].reg;
10158 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10159 is the SP-{plus,minus}-immediate form of the instruction. */
10161 constraint (Rd == REG_PC, BAD_PC);
10163 reject_bad_reg (Rd);
10165 inst.instruction |= (Rn << 16) | (Rd << 8);
10166 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10169 /* Parse an add or subtract instruction. We get here with inst.instruction
10170 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
10173 do_t_add_sub (void)
10177 Rd = inst.operands[0].reg;
10178 Rs = (inst.operands[1].present
10179 ? inst.operands[1].reg /* Rd, Rs, foo */
10180 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10183 set_it_insn_type_last ();
10185 if (unified_syntax)
10188 bfd_boolean narrow;
10191 flags = (inst.instruction == T_MNEM_adds
10192 || inst.instruction == T_MNEM_subs);
10194 narrow = !in_it_block ();
10196 narrow = in_it_block ();
10197 if (!inst.operands[2].isreg)
10201 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10203 add = (inst.instruction == T_MNEM_add
10204 || inst.instruction == T_MNEM_adds);
10206 if (inst.size_req != 4)
10208 /* Attempt to use a narrow opcode, with relaxation if
10210 if (Rd == REG_SP && Rs == REG_SP && !flags)
10211 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10212 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10213 opcode = T_MNEM_add_sp;
10214 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10215 opcode = T_MNEM_add_pc;
10216 else if (Rd <= 7 && Rs <= 7 && narrow)
10219 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10221 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10225 inst.instruction = THUMB_OP16(opcode);
10226 inst.instruction |= (Rd << 4) | Rs;
10227 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10228 if (inst.size_req != 2)
10229 inst.relax = opcode;
10232 constraint (inst.size_req == 2, BAD_HIREG);
10234 if (inst.size_req == 4
10235 || (inst.size_req != 2 && !opcode))
10239 constraint (add, BAD_PC);
10240 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10241 _("only SUBS PC, LR, #const allowed"));
10242 constraint (inst.reloc.exp.X_op != O_constant,
10243 _("expression too complex"));
10244 constraint (inst.reloc.exp.X_add_number < 0
10245 || inst.reloc.exp.X_add_number > 0xff,
10246 _("immediate value out of range"));
10247 inst.instruction = T2_SUBS_PC_LR
10248 | inst.reloc.exp.X_add_number;
10249 inst.reloc.type = BFD_RELOC_UNUSED;
10252 else if (Rs == REG_PC)
10254 /* Always use addw/subw. */
10255 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10256 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10260 inst.instruction = THUMB_OP32 (inst.instruction);
10261 inst.instruction = (inst.instruction & 0xe1ffffff)
10264 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10266 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10268 inst.instruction |= Rd << 8;
10269 inst.instruction |= Rs << 16;
10274 unsigned int value = inst.reloc.exp.X_add_number;
10275 unsigned int shift = inst.operands[2].shift_kind;
10277 Rn = inst.operands[2].reg;
10278 /* See if we can do this with a 16-bit instruction. */
10279 if (!inst.operands[2].shifted && inst.size_req != 4)
10281 if (Rd > 7 || Rs > 7 || Rn > 7)
10286 inst.instruction = ((inst.instruction == T_MNEM_adds
10287 || inst.instruction == T_MNEM_add)
10289 : T_OPCODE_SUB_R3);
10290 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10294 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10296 /* Thumb-1 cores (except v6-M) require at least one high
10297 register in a narrow non flag setting add. */
10298 if (Rd > 7 || Rn > 7
10299 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10300 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10307 inst.instruction = T_OPCODE_ADD_HI;
10308 inst.instruction |= (Rd & 8) << 4;
10309 inst.instruction |= (Rd & 7);
10310 inst.instruction |= Rn << 3;
10316 constraint (Rd == REG_PC, BAD_PC);
10317 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10318 constraint (Rs == REG_PC, BAD_PC);
10319 reject_bad_reg (Rn);
10321 /* If we get here, it can't be done in 16 bits. */
10322 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10323 _("shift must be constant"));
10324 inst.instruction = THUMB_OP32 (inst.instruction);
10325 inst.instruction |= Rd << 8;
10326 inst.instruction |= Rs << 16;
10327 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10328 _("shift value over 3 not allowed in thumb mode"));
10329 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10330 _("only LSL shift allowed in thumb mode"));
10331 encode_thumb32_shifted_operand (2);
10336 constraint (inst.instruction == T_MNEM_adds
10337 || inst.instruction == T_MNEM_subs,
10340 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10342 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10343 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10346 inst.instruction = (inst.instruction == T_MNEM_add
10347 ? 0x0000 : 0x8000);
10348 inst.instruction |= (Rd << 4) | Rs;
10349 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10353 Rn = inst.operands[2].reg;
10354 constraint (inst.operands[2].shifted, _("unshifted register required"));
10356 /* We now have Rd, Rs, and Rn set to registers. */
10357 if (Rd > 7 || Rs > 7 || Rn > 7)
10359 /* Can't do this for SUB. */
10360 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10361 inst.instruction = T_OPCODE_ADD_HI;
10362 inst.instruction |= (Rd & 8) << 4;
10363 inst.instruction |= (Rd & 7);
10365 inst.instruction |= Rn << 3;
10367 inst.instruction |= Rs << 3;
10369 constraint (1, _("dest must overlap one source register"));
10373 inst.instruction = (inst.instruction == T_MNEM_add
10374 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10375 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10385 Rd = inst.operands[0].reg;
10386 reject_bad_reg (Rd);
10388 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10390 /* Defer to section relaxation. */
10391 inst.relax = inst.instruction;
10392 inst.instruction = THUMB_OP16 (inst.instruction);
10393 inst.instruction |= Rd << 4;
10395 else if (unified_syntax && inst.size_req != 2)
10397 /* Generate a 32-bit opcode. */
10398 inst.instruction = THUMB_OP32 (inst.instruction);
10399 inst.instruction |= Rd << 8;
10400 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10401 inst.reloc.pc_rel = 1;
10405 /* Generate a 16-bit opcode. */
10406 inst.instruction = THUMB_OP16 (inst.instruction);
10407 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10408 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10409 inst.reloc.pc_rel = 1;
10411 inst.instruction |= Rd << 4;
10415 /* Arithmetic instructions for which there is just one 16-bit
10416 instruction encoding, and it allows only two low registers.
10417 For maximal compatibility with ARM syntax, we allow three register
10418 operands even when Thumb-32 instructions are not available, as long
10419 as the first two are identical. For instance, both "sbc r0,r1" and
10420 "sbc r0,r0,r1" are allowed. */
10426 Rd = inst.operands[0].reg;
10427 Rs = (inst.operands[1].present
10428 ? inst.operands[1].reg /* Rd, Rs, foo */
10429 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10430 Rn = inst.operands[2].reg;
10432 reject_bad_reg (Rd);
10433 reject_bad_reg (Rs);
10434 if (inst.operands[2].isreg)
10435 reject_bad_reg (Rn);
10437 if (unified_syntax)
10439 if (!inst.operands[2].isreg)
10441 /* For an immediate, we always generate a 32-bit opcode;
10442 section relaxation will shrink it later if possible. */
10443 inst.instruction = THUMB_OP32 (inst.instruction);
10444 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10445 inst.instruction |= Rd << 8;
10446 inst.instruction |= Rs << 16;
10447 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10451 bfd_boolean narrow;
10453 /* See if we can do this with a 16-bit instruction. */
10454 if (THUMB_SETS_FLAGS (inst.instruction))
10455 narrow = !in_it_block ();
10457 narrow = in_it_block ();
10459 if (Rd > 7 || Rn > 7 || Rs > 7)
10461 if (inst.operands[2].shifted)
10463 if (inst.size_req == 4)
10469 inst.instruction = THUMB_OP16 (inst.instruction);
10470 inst.instruction |= Rd;
10471 inst.instruction |= Rn << 3;
10475 /* If we get here, it can't be done in 16 bits. */
10476 constraint (inst.operands[2].shifted
10477 && inst.operands[2].immisreg,
10478 _("shift must be constant"));
10479 inst.instruction = THUMB_OP32 (inst.instruction);
10480 inst.instruction |= Rd << 8;
10481 inst.instruction |= Rs << 16;
10482 encode_thumb32_shifted_operand (2);
10487 /* On its face this is a lie - the instruction does set the
10488 flags. However, the only supported mnemonic in this mode
10489 says it doesn't. */
10490 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10492 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10493 _("unshifted register required"));
10494 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10495 constraint (Rd != Rs,
10496 _("dest and source1 must be the same register"));
10498 inst.instruction = THUMB_OP16 (inst.instruction);
10499 inst.instruction |= Rd;
10500 inst.instruction |= Rn << 3;
10504 /* Similarly, but for instructions where the arithmetic operation is
10505 commutative, so we can allow either of them to be different from
10506 the destination operand in a 16-bit instruction. For instance, all
10507 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10514 Rd = inst.operands[0].reg;
10515 Rs = (inst.operands[1].present
10516 ? inst.operands[1].reg /* Rd, Rs, foo */
10517 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10518 Rn = inst.operands[2].reg;
10520 reject_bad_reg (Rd);
10521 reject_bad_reg (Rs);
10522 if (inst.operands[2].isreg)
10523 reject_bad_reg (Rn);
10525 if (unified_syntax)
10527 if (!inst.operands[2].isreg)
10529 /* For an immediate, we always generate a 32-bit opcode;
10530 section relaxation will shrink it later if possible. */
10531 inst.instruction = THUMB_OP32 (inst.instruction);
10532 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10533 inst.instruction |= Rd << 8;
10534 inst.instruction |= Rs << 16;
10535 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10539 bfd_boolean narrow;
10541 /* See if we can do this with a 16-bit instruction. */
10542 if (THUMB_SETS_FLAGS (inst.instruction))
10543 narrow = !in_it_block ();
10545 narrow = in_it_block ();
10547 if (Rd > 7 || Rn > 7 || Rs > 7)
10549 if (inst.operands[2].shifted)
10551 if (inst.size_req == 4)
10558 inst.instruction = THUMB_OP16 (inst.instruction);
10559 inst.instruction |= Rd;
10560 inst.instruction |= Rn << 3;
10565 inst.instruction = THUMB_OP16 (inst.instruction);
10566 inst.instruction |= Rd;
10567 inst.instruction |= Rs << 3;
10572 /* If we get here, it can't be done in 16 bits. */
10573 constraint (inst.operands[2].shifted
10574 && inst.operands[2].immisreg,
10575 _("shift must be constant"));
10576 inst.instruction = THUMB_OP32 (inst.instruction);
10577 inst.instruction |= Rd << 8;
10578 inst.instruction |= Rs << 16;
10579 encode_thumb32_shifted_operand (2);
10584 /* On its face this is a lie - the instruction does set the
10585 flags. However, the only supported mnemonic in this mode
10586 says it doesn't. */
10587 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10589 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10590 _("unshifted register required"));
10591 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10593 inst.instruction = THUMB_OP16 (inst.instruction);
10594 inst.instruction |= Rd;
10597 inst.instruction |= Rn << 3;
10599 inst.instruction |= Rs << 3;
10601 constraint (1, _("dest must overlap one source register"));
10609 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
10610 constraint (msb > 32, _("bit-field extends past end of register"));
10611 /* The instruction encoding stores the LSB and MSB,
10612 not the LSB and width. */
10613 Rd = inst.operands[0].reg;
10614 reject_bad_reg (Rd);
10615 inst.instruction |= Rd << 8;
10616 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
10617 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
10618 inst.instruction |= msb - 1;
10627 Rd = inst.operands[0].reg;
10628 reject_bad_reg (Rd);
10630 /* #0 in second position is alternative syntax for bfc, which is
10631 the same instruction but with REG_PC in the Rm field. */
10632 if (!inst.operands[1].isreg)
10636 Rn = inst.operands[1].reg;
10637 reject_bad_reg (Rn);
10640 msb = inst.operands[2].imm + inst.operands[3].imm;
10641 constraint (msb > 32, _("bit-field extends past end of register"));
10642 /* The instruction encoding stores the LSB and MSB,
10643 not the LSB and width. */
10644 inst.instruction |= Rd << 8;
10645 inst.instruction |= Rn << 16;
10646 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10647 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10648 inst.instruction |= msb - 1;
10656 Rd = inst.operands[0].reg;
10657 Rn = inst.operands[1].reg;
10659 reject_bad_reg (Rd);
10660 reject_bad_reg (Rn);
10662 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
10663 _("bit-field extends past end of register"));
10664 inst.instruction |= Rd << 8;
10665 inst.instruction |= Rn << 16;
10666 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
10667 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
10668 inst.instruction |= inst.operands[3].imm - 1;
10671 /* ARM V5 Thumb BLX (argument parse)
10672 BLX <target_addr> which is BLX(1)
10673 BLX <Rm> which is BLX(2)
10674 Unfortunately, there are two different opcodes for this mnemonic.
10675 So, the insns[].value is not used, and the code here zaps values
10676 into inst.instruction.
10678 ??? How to take advantage of the additional two bits of displacement
10679 available in Thumb32 mode? Need new relocation? */
10684 set_it_insn_type_last ();
10686 if (inst.operands[0].isreg)
10688 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10689 /* We have a register, so this is BLX(2). */
10690 inst.instruction |= inst.operands[0].reg << 3;
10694 /* No register. This must be BLX(1). */
10695 inst.instruction = 0xf000e800;
10696 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
10708 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
10710 if (in_it_block ())
10712 /* Conditional branches inside IT blocks are encoded as unconditional
10714 cond = COND_ALWAYS;
10719 if (cond != COND_ALWAYS)
10720 opcode = T_MNEM_bcond;
10722 opcode = inst.instruction;
10725 && (inst.size_req == 4
10726 || (inst.size_req != 2
10727 && (inst.operands[0].hasreloc
10728 || inst.reloc.exp.X_op == O_constant))))
10730 inst.instruction = THUMB_OP32(opcode);
10731 if (cond == COND_ALWAYS)
10732 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
10735 gas_assert (cond != 0xF);
10736 inst.instruction |= cond << 22;
10737 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
10742 inst.instruction = THUMB_OP16(opcode);
10743 if (cond == COND_ALWAYS)
10744 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
10747 inst.instruction |= cond << 8;
10748 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
10750 /* Allow section relaxation. */
10751 if (unified_syntax && inst.size_req != 2)
10752 inst.relax = opcode;
10754 inst.reloc.type = reloc;
10755 inst.reloc.pc_rel = 1;
10758 /* Actually do the work for Thumb state bkpt and hlt. The only difference
10759 between the two is the maximum immediate allowed - which is passed in
10762 do_t_bkpt_hlt1 (int range)
10764 constraint (inst.cond != COND_ALWAYS,
10765 _("instruction is always unconditional"));
10766 if (inst.operands[0].present)
10768 constraint (inst.operands[0].imm > range,
10769 _("immediate value out of range"));
10770 inst.instruction |= inst.operands[0].imm;
10773 set_it_insn_type (NEUTRAL_IT_INSN);
10779 do_t_bkpt_hlt1 (63);
10785 do_t_bkpt_hlt1 (255);
10789 do_t_branch23 (void)
10791 set_it_insn_type_last ();
10792 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
10794 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
10795 this file. We used to simply ignore the PLT reloc type here --
10796 the branch encoding is now needed to deal with TLSCALL relocs.
10797 So if we see a PLT reloc now, put it back to how it used to be to
10798 keep the preexisting behaviour. */
10799 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
10800 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
10802 #if defined(OBJ_COFF)
10803 /* If the destination of the branch is a defined symbol which does not have
10804 the THUMB_FUNC attribute, then we must be calling a function which has
10805 the (interfacearm) attribute. We look for the Thumb entry point to that
10806 function and change the branch to refer to that function instead. */
10807 if ( inst.reloc.exp.X_op == O_symbol
10808 && inst.reloc.exp.X_add_symbol != NULL
10809 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10810 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10811 inst.reloc.exp.X_add_symbol =
10812 find_real_start (inst.reloc.exp.X_add_symbol);
10819 set_it_insn_type_last ();
10820 inst.instruction |= inst.operands[0].reg << 3;
10821 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
10822 should cause the alignment to be checked once it is known. This is
10823 because BX PC only works if the instruction is word aligned. */
10831 set_it_insn_type_last ();
10832 Rm = inst.operands[0].reg;
10833 reject_bad_reg (Rm);
10834 inst.instruction |= Rm << 16;
10843 Rd = inst.operands[0].reg;
10844 Rm = inst.operands[1].reg;
10846 reject_bad_reg (Rd);
10847 reject_bad_reg (Rm);
10849 inst.instruction |= Rd << 8;
10850 inst.instruction |= Rm << 16;
10851 inst.instruction |= Rm;
10857 set_it_insn_type (OUTSIDE_IT_INSN);
10858 inst.instruction |= inst.operands[0].imm;
10864 set_it_insn_type (OUTSIDE_IT_INSN);
10866 && (inst.operands[1].present || inst.size_req == 4)
10867 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
10869 unsigned int imod = (inst.instruction & 0x0030) >> 4;
10870 inst.instruction = 0xf3af8000;
10871 inst.instruction |= imod << 9;
10872 inst.instruction |= inst.operands[0].imm << 5;
10873 if (inst.operands[1].present)
10874 inst.instruction |= 0x100 | inst.operands[1].imm;
10878 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
10879 && (inst.operands[0].imm & 4),
10880 _("selected processor does not support 'A' form "
10881 "of this instruction"));
10882 constraint (inst.operands[1].present || inst.size_req == 4,
10883 _("Thumb does not support the 2-argument "
10884 "form of this instruction"));
10885 inst.instruction |= inst.operands[0].imm;
10889 /* THUMB CPY instruction (argument parse). */
10894 if (inst.size_req == 4)
10896 inst.instruction = THUMB_OP32 (T_MNEM_mov);
10897 inst.instruction |= inst.operands[0].reg << 8;
10898 inst.instruction |= inst.operands[1].reg;
10902 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
10903 inst.instruction |= (inst.operands[0].reg & 0x7);
10904 inst.instruction |= inst.operands[1].reg << 3;
10911 set_it_insn_type (OUTSIDE_IT_INSN);
10912 constraint (inst.operands[0].reg > 7, BAD_HIREG);
10913 inst.instruction |= inst.operands[0].reg;
10914 inst.reloc.pc_rel = 1;
10915 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
10921 inst.instruction |= inst.operands[0].imm;
10927 unsigned Rd, Rn, Rm;
10929 Rd = inst.operands[0].reg;
10930 Rn = (inst.operands[1].present
10931 ? inst.operands[1].reg : Rd);
10932 Rm = inst.operands[2].reg;
10934 reject_bad_reg (Rd);
10935 reject_bad_reg (Rn);
10936 reject_bad_reg (Rm);
10938 inst.instruction |= Rd << 8;
10939 inst.instruction |= Rn << 16;
10940 inst.instruction |= Rm;
10946 if (unified_syntax && inst.size_req == 4)
10947 inst.instruction = THUMB_OP32 (inst.instruction);
10949 inst.instruction = THUMB_OP16 (inst.instruction);
10955 unsigned int cond = inst.operands[0].imm;
10957 set_it_insn_type (IT_INSN);
10958 now_it.mask = (inst.instruction & 0xf) | 0x10;
10960 now_it.warn_deprecated = FALSE;
10962 /* If the condition is a negative condition, invert the mask. */
10963 if ((cond & 0x1) == 0x0)
10965 unsigned int mask = inst.instruction & 0x000f;
10967 if ((mask & 0x7) == 0)
10969 /* No conversion needed. */
10970 now_it.block_length = 1;
10972 else if ((mask & 0x3) == 0)
10975 now_it.block_length = 2;
10977 else if ((mask & 0x1) == 0)
10980 now_it.block_length = 3;
10985 now_it.block_length = 4;
10988 inst.instruction &= 0xfff0;
10989 inst.instruction |= mask;
10992 inst.instruction |= cond << 4;
10995 /* Helper function used for both push/pop and ldm/stm. */
10997 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11001 load = (inst.instruction & (1 << 20)) != 0;
11003 if (mask & (1 << 13))
11004 inst.error = _("SP not allowed in register list");
11006 if ((mask & (1 << base)) != 0
11008 inst.error = _("having the base register in the register list when "
11009 "using write back is UNPREDICTABLE");
11013 if (mask & (1 << 15))
11015 if (mask & (1 << 14))
11016 inst.error = _("LR and PC should not both be in register list");
11018 set_it_insn_type_last ();
11023 if (mask & (1 << 15))
11024 inst.error = _("PC not allowed in register list");
11027 if ((mask & (mask - 1)) == 0)
11029 /* Single register transfers implemented as str/ldr. */
11032 if (inst.instruction & (1 << 23))
11033 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11035 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11039 if (inst.instruction & (1 << 23))
11040 inst.instruction = 0x00800000; /* ia -> [base] */
11042 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11045 inst.instruction |= 0xf8400000;
11047 inst.instruction |= 0x00100000;
11049 mask = ffs (mask) - 1;
11052 else if (writeback)
11053 inst.instruction |= WRITE_BACK;
11055 inst.instruction |= mask;
11056 inst.instruction |= base << 16;
11062 /* This really doesn't seem worth it. */
11063 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11064 _("expression too complex"));
11065 constraint (inst.operands[1].writeback,
11066 _("Thumb load/store multiple does not support {reglist}^"));
11068 if (unified_syntax)
11070 bfd_boolean narrow;
11074 /* See if we can use a 16-bit instruction. */
11075 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11076 && inst.size_req != 4
11077 && !(inst.operands[1].imm & ~0xff))
11079 mask = 1 << inst.operands[0].reg;
11081 if (inst.operands[0].reg <= 7)
11083 if (inst.instruction == T_MNEM_stmia
11084 ? inst.operands[0].writeback
11085 : (inst.operands[0].writeback
11086 == !(inst.operands[1].imm & mask)))
11088 if (inst.instruction == T_MNEM_stmia
11089 && (inst.operands[1].imm & mask)
11090 && (inst.operands[1].imm & (mask - 1)))
11091 as_warn (_("value stored for r%d is UNKNOWN"),
11092 inst.operands[0].reg);
11094 inst.instruction = THUMB_OP16 (inst.instruction);
11095 inst.instruction |= inst.operands[0].reg << 8;
11096 inst.instruction |= inst.operands[1].imm;
11099 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11101 /* This means 1 register in reg list one of 3 situations:
11102 1. Instruction is stmia, but without writeback.
11103 2. lmdia without writeback, but with Rn not in
11105 3. ldmia with writeback, but with Rn in reglist.
11106 Case 3 is UNPREDICTABLE behaviour, so we handle
11107 case 1 and 2 which can be converted into a 16-bit
11108 str or ldr. The SP cases are handled below. */
11109 unsigned long opcode;
11110 /* First, record an error for Case 3. */
11111 if (inst.operands[1].imm & mask
11112 && inst.operands[0].writeback)
11114 _("having the base register in the register list when "
11115 "using write back is UNPREDICTABLE");
11117 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11119 inst.instruction = THUMB_OP16 (opcode);
11120 inst.instruction |= inst.operands[0].reg << 3;
11121 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11125 else if (inst.operands[0] .reg == REG_SP)
11127 if (inst.operands[0].writeback)
11130 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11131 ? T_MNEM_push : T_MNEM_pop);
11132 inst.instruction |= inst.operands[1].imm;
11135 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11138 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11139 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11140 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11148 if (inst.instruction < 0xffff)
11149 inst.instruction = THUMB_OP32 (inst.instruction);
11151 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11152 inst.operands[0].writeback);
11157 constraint (inst.operands[0].reg > 7
11158 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11159 constraint (inst.instruction != T_MNEM_ldmia
11160 && inst.instruction != T_MNEM_stmia,
11161 _("Thumb-2 instruction only valid in unified syntax"));
11162 if (inst.instruction == T_MNEM_stmia)
11164 if (!inst.operands[0].writeback)
11165 as_warn (_("this instruction will write back the base register"));
11166 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11167 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11168 as_warn (_("value stored for r%d is UNKNOWN"),
11169 inst.operands[0].reg);
11173 if (!inst.operands[0].writeback
11174 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11175 as_warn (_("this instruction will write back the base register"));
11176 else if (inst.operands[0].writeback
11177 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11178 as_warn (_("this instruction will not write back the base register"));
11181 inst.instruction = THUMB_OP16 (inst.instruction);
11182 inst.instruction |= inst.operands[0].reg << 8;
11183 inst.instruction |= inst.operands[1].imm;
11190 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11191 || inst.operands[1].postind || inst.operands[1].writeback
11192 || inst.operands[1].immisreg || inst.operands[1].shifted
11193 || inst.operands[1].negative,
11196 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11198 inst.instruction |= inst.operands[0].reg << 12;
11199 inst.instruction |= inst.operands[1].reg << 16;
11200 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11206 if (!inst.operands[1].present)
11208 constraint (inst.operands[0].reg == REG_LR,
11209 _("r14 not allowed as first register "
11210 "when second register is omitted"));
11211 inst.operands[1].reg = inst.operands[0].reg + 1;
11213 constraint (inst.operands[0].reg == inst.operands[1].reg,
11216 inst.instruction |= inst.operands[0].reg << 12;
11217 inst.instruction |= inst.operands[1].reg << 8;
11218 inst.instruction |= inst.operands[2].reg << 16;
11224 unsigned long opcode;
11227 if (inst.operands[0].isreg
11228 && !inst.operands[0].preind
11229 && inst.operands[0].reg == REG_PC)
11230 set_it_insn_type_last ();
11232 opcode = inst.instruction;
11233 if (unified_syntax)
11235 if (!inst.operands[1].isreg)
11237 if (opcode <= 0xffff)
11238 inst.instruction = THUMB_OP32 (opcode);
11239 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11242 if (inst.operands[1].isreg
11243 && !inst.operands[1].writeback
11244 && !inst.operands[1].shifted && !inst.operands[1].postind
11245 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11246 && opcode <= 0xffff
11247 && inst.size_req != 4)
11249 /* Insn may have a 16-bit form. */
11250 Rn = inst.operands[1].reg;
11251 if (inst.operands[1].immisreg)
11253 inst.instruction = THUMB_OP16 (opcode);
11255 if (Rn <= 7 && inst.operands[1].imm <= 7)
11257 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11258 reject_bad_reg (inst.operands[1].imm);
11260 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11261 && opcode != T_MNEM_ldrsb)
11262 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11263 || (Rn == REG_SP && opcode == T_MNEM_str))
11270 if (inst.reloc.pc_rel)
11271 opcode = T_MNEM_ldr_pc2;
11273 opcode = T_MNEM_ldr_pc;
11277 if (opcode == T_MNEM_ldr)
11278 opcode = T_MNEM_ldr_sp;
11280 opcode = T_MNEM_str_sp;
11282 inst.instruction = inst.operands[0].reg << 8;
11286 inst.instruction = inst.operands[0].reg;
11287 inst.instruction |= inst.operands[1].reg << 3;
11289 inst.instruction |= THUMB_OP16 (opcode);
11290 if (inst.size_req == 2)
11291 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11293 inst.relax = opcode;
11297 /* Definitely a 32-bit variant. */
11299 /* Warning for Erratum 752419. */
11300 if (opcode == T_MNEM_ldr
11301 && inst.operands[0].reg == REG_SP
11302 && inst.operands[1].writeback == 1
11303 && !inst.operands[1].immisreg)
11305 if (no_cpu_selected ()
11306 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11307 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11308 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11309 as_warn (_("This instruction may be unpredictable "
11310 "if executed on M-profile cores "
11311 "with interrupts enabled."));
11314 /* Do some validations regarding addressing modes. */
11315 if (inst.operands[1].immisreg)
11316 reject_bad_reg (inst.operands[1].imm);
11318 constraint (inst.operands[1].writeback == 1
11319 && inst.operands[0].reg == inst.operands[1].reg,
11322 inst.instruction = THUMB_OP32 (opcode);
11323 inst.instruction |= inst.operands[0].reg << 12;
11324 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11325 check_ldr_r15_aligned ();
11329 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11331 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11333 /* Only [Rn,Rm] is acceptable. */
11334 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11335 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11336 || inst.operands[1].postind || inst.operands[1].shifted
11337 || inst.operands[1].negative,
11338 _("Thumb does not support this addressing mode"));
11339 inst.instruction = THUMB_OP16 (inst.instruction);
11343 inst.instruction = THUMB_OP16 (inst.instruction);
11344 if (!inst.operands[1].isreg)
11345 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11348 constraint (!inst.operands[1].preind
11349 || inst.operands[1].shifted
11350 || inst.operands[1].writeback,
11351 _("Thumb does not support this addressing mode"));
11352 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11354 constraint (inst.instruction & 0x0600,
11355 _("byte or halfword not valid for base register"));
11356 constraint (inst.operands[1].reg == REG_PC
11357 && !(inst.instruction & THUMB_LOAD_BIT),
11358 _("r15 based store not allowed"));
11359 constraint (inst.operands[1].immisreg,
11360 _("invalid base register for register offset"));
11362 if (inst.operands[1].reg == REG_PC)
11363 inst.instruction = T_OPCODE_LDR_PC;
11364 else if (inst.instruction & THUMB_LOAD_BIT)
11365 inst.instruction = T_OPCODE_LDR_SP;
11367 inst.instruction = T_OPCODE_STR_SP;
11369 inst.instruction |= inst.operands[0].reg << 8;
11370 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11374 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11375 if (!inst.operands[1].immisreg)
11377 /* Immediate offset. */
11378 inst.instruction |= inst.operands[0].reg;
11379 inst.instruction |= inst.operands[1].reg << 3;
11380 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11384 /* Register offset. */
11385 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11386 constraint (inst.operands[1].negative,
11387 _("Thumb does not support this addressing mode"));
11390 switch (inst.instruction)
11392 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11393 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11394 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11395 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11396 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11397 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11398 case 0x5600 /* ldrsb */:
11399 case 0x5e00 /* ldrsh */: break;
11403 inst.instruction |= inst.operands[0].reg;
11404 inst.instruction |= inst.operands[1].reg << 3;
11405 inst.instruction |= inst.operands[1].imm << 6;
11411 if (!inst.operands[1].present)
11413 inst.operands[1].reg = inst.operands[0].reg + 1;
11414 constraint (inst.operands[0].reg == REG_LR,
11415 _("r14 not allowed here"));
11416 constraint (inst.operands[0].reg == REG_R12,
11417 _("r12 not allowed here"));
11420 if (inst.operands[2].writeback
11421 && (inst.operands[0].reg == inst.operands[2].reg
11422 || inst.operands[1].reg == inst.operands[2].reg))
11423 as_warn (_("base register written back, and overlaps "
11424 "one of transfer registers"));
11426 inst.instruction |= inst.operands[0].reg << 12;
11427 inst.instruction |= inst.operands[1].reg << 8;
11428 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11434 inst.instruction |= inst.operands[0].reg << 12;
11435 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11441 unsigned Rd, Rn, Rm, Ra;
11443 Rd = inst.operands[0].reg;
11444 Rn = inst.operands[1].reg;
11445 Rm = inst.operands[2].reg;
11446 Ra = inst.operands[3].reg;
11448 reject_bad_reg (Rd);
11449 reject_bad_reg (Rn);
11450 reject_bad_reg (Rm);
11451 reject_bad_reg (Ra);
11453 inst.instruction |= Rd << 8;
11454 inst.instruction |= Rn << 16;
11455 inst.instruction |= Rm;
11456 inst.instruction |= Ra << 12;
11462 unsigned RdLo, RdHi, Rn, Rm;
11464 RdLo = inst.operands[0].reg;
11465 RdHi = inst.operands[1].reg;
11466 Rn = inst.operands[2].reg;
11467 Rm = inst.operands[3].reg;
11469 reject_bad_reg (RdLo);
11470 reject_bad_reg (RdHi);
11471 reject_bad_reg (Rn);
11472 reject_bad_reg (Rm);
11474 inst.instruction |= RdLo << 12;
11475 inst.instruction |= RdHi << 8;
11476 inst.instruction |= Rn << 16;
11477 inst.instruction |= Rm;
11481 do_t_mov_cmp (void)
11485 Rn = inst.operands[0].reg;
11486 Rm = inst.operands[1].reg;
11489 set_it_insn_type_last ();
11491 if (unified_syntax)
11493 int r0off = (inst.instruction == T_MNEM_mov
11494 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11495 unsigned long opcode;
11496 bfd_boolean narrow;
11497 bfd_boolean low_regs;
11499 low_regs = (Rn <= 7 && Rm <= 7);
11500 opcode = inst.instruction;
11501 if (in_it_block ())
11502 narrow = opcode != T_MNEM_movs;
11504 narrow = opcode != T_MNEM_movs || low_regs;
11505 if (inst.size_req == 4
11506 || inst.operands[1].shifted)
11509 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11510 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11511 && !inst.operands[1].shifted
11515 inst.instruction = T2_SUBS_PC_LR;
11519 if (opcode == T_MNEM_cmp)
11521 constraint (Rn == REG_PC, BAD_PC);
11524 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11526 warn_deprecated_sp (Rm);
11527 /* R15 was documented as a valid choice for Rm in ARMv6,
11528 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11529 tools reject R15, so we do too. */
11530 constraint (Rm == REG_PC, BAD_PC);
11533 reject_bad_reg (Rm);
11535 else if (opcode == T_MNEM_mov
11536 || opcode == T_MNEM_movs)
11538 if (inst.operands[1].isreg)
11540 if (opcode == T_MNEM_movs)
11542 reject_bad_reg (Rn);
11543 reject_bad_reg (Rm);
11547 /* This is mov.n. */
11548 if ((Rn == REG_SP || Rn == REG_PC)
11549 && (Rm == REG_SP || Rm == REG_PC))
11551 as_warn (_("Use of r%u as a source register is "
11552 "deprecated when r%u is the destination "
11553 "register."), Rm, Rn);
11558 /* This is mov.w. */
11559 constraint (Rn == REG_PC, BAD_PC);
11560 constraint (Rm == REG_PC, BAD_PC);
11561 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
11565 reject_bad_reg (Rn);
11568 if (!inst.operands[1].isreg)
11570 /* Immediate operand. */
11571 if (!in_it_block () && opcode == T_MNEM_mov)
11573 if (low_regs && narrow)
11575 inst.instruction = THUMB_OP16 (opcode);
11576 inst.instruction |= Rn << 8;
11577 if (inst.size_req == 2)
11578 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11580 inst.relax = opcode;
11584 inst.instruction = THUMB_OP32 (inst.instruction);
11585 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11586 inst.instruction |= Rn << r0off;
11587 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11590 else if (inst.operands[1].shifted && inst.operands[1].immisreg
11591 && (inst.instruction == T_MNEM_mov
11592 || inst.instruction == T_MNEM_movs))
11594 /* Register shifts are encoded as separate shift instructions. */
11595 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
11597 if (in_it_block ())
11602 if (inst.size_req == 4)
11605 if (!low_regs || inst.operands[1].imm > 7)
11611 switch (inst.operands[1].shift_kind)
11614 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
11617 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
11620 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
11623 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
11629 inst.instruction = opcode;
11632 inst.instruction |= Rn;
11633 inst.instruction |= inst.operands[1].imm << 3;
11638 inst.instruction |= CONDS_BIT;
11640 inst.instruction |= Rn << 8;
11641 inst.instruction |= Rm << 16;
11642 inst.instruction |= inst.operands[1].imm;
11647 /* Some mov with immediate shift have narrow variants.
11648 Register shifts are handled above. */
11649 if (low_regs && inst.operands[1].shifted
11650 && (inst.instruction == T_MNEM_mov
11651 || inst.instruction == T_MNEM_movs))
11653 if (in_it_block ())
11654 narrow = (inst.instruction == T_MNEM_mov);
11656 narrow = (inst.instruction == T_MNEM_movs);
11661 switch (inst.operands[1].shift_kind)
11663 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
11664 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
11665 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
11666 default: narrow = FALSE; break;
11672 inst.instruction |= Rn;
11673 inst.instruction |= Rm << 3;
11674 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
11678 inst.instruction = THUMB_OP32 (inst.instruction);
11679 inst.instruction |= Rn << r0off;
11680 encode_thumb32_shifted_operand (1);
11684 switch (inst.instruction)
11687 /* In v4t or v5t a move of two lowregs produces unpredictable
11688 results. Don't allow this. */
11691 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
11692 "MOV Rd, Rs with two low registers is not "
11693 "permitted on this architecture");
11694 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11698 inst.instruction = T_OPCODE_MOV_HR;
11699 inst.instruction |= (Rn & 0x8) << 4;
11700 inst.instruction |= (Rn & 0x7);
11701 inst.instruction |= Rm << 3;
11705 /* We know we have low registers at this point.
11706 Generate LSLS Rd, Rs, #0. */
11707 inst.instruction = T_OPCODE_LSL_I;
11708 inst.instruction |= Rn;
11709 inst.instruction |= Rm << 3;
11715 inst.instruction = T_OPCODE_CMP_LR;
11716 inst.instruction |= Rn;
11717 inst.instruction |= Rm << 3;
11721 inst.instruction = T_OPCODE_CMP_HR;
11722 inst.instruction |= (Rn & 0x8) << 4;
11723 inst.instruction |= (Rn & 0x7);
11724 inst.instruction |= Rm << 3;
11731 inst.instruction = THUMB_OP16 (inst.instruction);
11733 /* PR 10443: Do not silently ignore shifted operands. */
11734 constraint (inst.operands[1].shifted,
11735 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
11737 if (inst.operands[1].isreg)
11739 if (Rn < 8 && Rm < 8)
11741 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
11742 since a MOV instruction produces unpredictable results. */
11743 if (inst.instruction == T_OPCODE_MOV_I8)
11744 inst.instruction = T_OPCODE_ADD_I3;
11746 inst.instruction = T_OPCODE_CMP_LR;
11748 inst.instruction |= Rn;
11749 inst.instruction |= Rm << 3;
11753 if (inst.instruction == T_OPCODE_MOV_I8)
11754 inst.instruction = T_OPCODE_MOV_HR;
11756 inst.instruction = T_OPCODE_CMP_HR;
11762 constraint (Rn > 7,
11763 _("only lo regs allowed with immediate"));
11764 inst.instruction |= Rn << 8;
11765 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
11776 top = (inst.instruction & 0x00800000) != 0;
11777 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
11779 constraint (top, _(":lower16: not allowed this instruction"));
11780 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
11782 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
11784 constraint (!top, _(":upper16: not allowed this instruction"));
11785 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
11788 Rd = inst.operands[0].reg;
11789 reject_bad_reg (Rd);
11791 inst.instruction |= Rd << 8;
11792 if (inst.reloc.type == BFD_RELOC_UNUSED)
11794 imm = inst.reloc.exp.X_add_number;
11795 inst.instruction |= (imm & 0xf000) << 4;
11796 inst.instruction |= (imm & 0x0800) << 15;
11797 inst.instruction |= (imm & 0x0700) << 4;
11798 inst.instruction |= (imm & 0x00ff);
11803 do_t_mvn_tst (void)
11807 Rn = inst.operands[0].reg;
11808 Rm = inst.operands[1].reg;
11810 if (inst.instruction == T_MNEM_cmp
11811 || inst.instruction == T_MNEM_cmn)
11812 constraint (Rn == REG_PC, BAD_PC);
11814 reject_bad_reg (Rn);
11815 reject_bad_reg (Rm);
11817 if (unified_syntax)
11819 int r0off = (inst.instruction == T_MNEM_mvn
11820 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
11821 bfd_boolean narrow;
11823 if (inst.size_req == 4
11824 || inst.instruction > 0xffff
11825 || inst.operands[1].shifted
11826 || Rn > 7 || Rm > 7)
11828 else if (inst.instruction == T_MNEM_cmn
11829 || inst.instruction == T_MNEM_tst)
11831 else if (THUMB_SETS_FLAGS (inst.instruction))
11832 narrow = !in_it_block ();
11834 narrow = in_it_block ();
11836 if (!inst.operands[1].isreg)
11838 /* For an immediate, we always generate a 32-bit opcode;
11839 section relaxation will shrink it later if possible. */
11840 if (inst.instruction < 0xffff)
11841 inst.instruction = THUMB_OP32 (inst.instruction);
11842 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11843 inst.instruction |= Rn << r0off;
11844 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
11848 /* See if we can do this with a 16-bit instruction. */
11851 inst.instruction = THUMB_OP16 (inst.instruction);
11852 inst.instruction |= Rn;
11853 inst.instruction |= Rm << 3;
11857 constraint (inst.operands[1].shifted
11858 && inst.operands[1].immisreg,
11859 _("shift must be constant"));
11860 if (inst.instruction < 0xffff)
11861 inst.instruction = THUMB_OP32 (inst.instruction);
11862 inst.instruction |= Rn << r0off;
11863 encode_thumb32_shifted_operand (1);
11869 constraint (inst.instruction > 0xffff
11870 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
11871 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
11872 _("unshifted register required"));
11873 constraint (Rn > 7 || Rm > 7,
11876 inst.instruction = THUMB_OP16 (inst.instruction);
11877 inst.instruction |= Rn;
11878 inst.instruction |= Rm << 3;
11887 if (do_vfp_nsyn_mrs () == SUCCESS)
11890 Rd = inst.operands[0].reg;
11891 reject_bad_reg (Rd);
11892 inst.instruction |= Rd << 8;
11894 if (inst.operands[1].isreg)
11896 unsigned br = inst.operands[1].reg;
11897 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
11898 as_bad (_("bad register for mrs"));
11900 inst.instruction |= br & (0xf << 16);
11901 inst.instruction |= (br & 0x300) >> 4;
11902 inst.instruction |= (br & SPSR_BIT) >> 2;
11906 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11908 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11910 /* PR gas/12698: The constraint is only applied for m_profile.
11911 If the user has specified -march=all, we want to ignore it as
11912 we are building for any CPU type, including non-m variants. */
11913 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11914 constraint ((flags != 0) && m_profile, _("selected processor does "
11915 "not support requested special purpose register"));
11918 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
11920 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
11921 _("'APSR', 'CPSR' or 'SPSR' expected"));
11923 inst.instruction |= (flags & SPSR_BIT) >> 2;
11924 inst.instruction |= inst.operands[1].imm & 0xff;
11925 inst.instruction |= 0xf0000;
11935 if (do_vfp_nsyn_msr () == SUCCESS)
11938 constraint (!inst.operands[1].isreg,
11939 _("Thumb encoding does not support an immediate here"));
11941 if (inst.operands[0].isreg)
11942 flags = (int)(inst.operands[0].reg);
11944 flags = inst.operands[0].imm;
11946 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
11948 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
11950 /* PR gas/12698: The constraint is only applied for m_profile.
11951 If the user has specified -march=all, we want to ignore it as
11952 we are building for any CPU type, including non-m variants. */
11953 bfd_boolean m_profile = selected_cpu.core != arm_arch_any.core;
11954 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11955 && (bits & ~(PSR_s | PSR_f)) != 0)
11956 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
11957 && bits != PSR_f)) && m_profile,
11958 _("selected processor does not support requested special "
11959 "purpose register"));
11962 constraint ((flags & 0xff) != 0, _("selected processor does not support "
11963 "requested special purpose register"));
11965 Rn = inst.operands[1].reg;
11966 reject_bad_reg (Rn);
11968 inst.instruction |= (flags & SPSR_BIT) >> 2;
11969 inst.instruction |= (flags & 0xf0000) >> 8;
11970 inst.instruction |= (flags & 0x300) >> 4;
11971 inst.instruction |= (flags & 0xff);
11972 inst.instruction |= Rn << 16;
11978 bfd_boolean narrow;
11979 unsigned Rd, Rn, Rm;
11981 if (!inst.operands[2].present)
11982 inst.operands[2].reg = inst.operands[0].reg;
11984 Rd = inst.operands[0].reg;
11985 Rn = inst.operands[1].reg;
11986 Rm = inst.operands[2].reg;
11988 if (unified_syntax)
11990 if (inst.size_req == 4
11996 else if (inst.instruction == T_MNEM_muls)
11997 narrow = !in_it_block ();
11999 narrow = in_it_block ();
12003 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12004 constraint (Rn > 7 || Rm > 7,
12011 /* 16-bit MULS/Conditional MUL. */
12012 inst.instruction = THUMB_OP16 (inst.instruction);
12013 inst.instruction |= Rd;
12016 inst.instruction |= Rm << 3;
12018 inst.instruction |= Rn << 3;
12020 constraint (1, _("dest must overlap one source register"));
12024 constraint (inst.instruction != T_MNEM_mul,
12025 _("Thumb-2 MUL must not set flags"));
12027 inst.instruction = THUMB_OP32 (inst.instruction);
12028 inst.instruction |= Rd << 8;
12029 inst.instruction |= Rn << 16;
12030 inst.instruction |= Rm << 0;
12032 reject_bad_reg (Rd);
12033 reject_bad_reg (Rn);
12034 reject_bad_reg (Rm);
12041 unsigned RdLo, RdHi, Rn, Rm;
12043 RdLo = inst.operands[0].reg;
12044 RdHi = inst.operands[1].reg;
12045 Rn = inst.operands[2].reg;
12046 Rm = inst.operands[3].reg;
12048 reject_bad_reg (RdLo);
12049 reject_bad_reg (RdHi);
12050 reject_bad_reg (Rn);
12051 reject_bad_reg (Rm);
12053 inst.instruction |= RdLo << 12;
12054 inst.instruction |= RdHi << 8;
12055 inst.instruction |= Rn << 16;
12056 inst.instruction |= Rm;
12059 as_tsktsk (_("rdhi and rdlo must be different"));
12065 set_it_insn_type (NEUTRAL_IT_INSN);
12067 if (unified_syntax)
12069 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12071 inst.instruction = THUMB_OP32 (inst.instruction);
12072 inst.instruction |= inst.operands[0].imm;
12076 /* PR9722: Check for Thumb2 availability before
12077 generating a thumb2 nop instruction. */
12078 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12080 inst.instruction = THUMB_OP16 (inst.instruction);
12081 inst.instruction |= inst.operands[0].imm << 4;
12084 inst.instruction = 0x46c0;
12089 constraint (inst.operands[0].present,
12090 _("Thumb does not support NOP with hints"));
12091 inst.instruction = 0x46c0;
12098 if (unified_syntax)
12100 bfd_boolean narrow;
12102 if (THUMB_SETS_FLAGS (inst.instruction))
12103 narrow = !in_it_block ();
12105 narrow = in_it_block ();
12106 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12108 if (inst.size_req == 4)
12113 inst.instruction = THUMB_OP32 (inst.instruction);
12114 inst.instruction |= inst.operands[0].reg << 8;
12115 inst.instruction |= inst.operands[1].reg << 16;
12119 inst.instruction = THUMB_OP16 (inst.instruction);
12120 inst.instruction |= inst.operands[0].reg;
12121 inst.instruction |= inst.operands[1].reg << 3;
12126 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12128 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12130 inst.instruction = THUMB_OP16 (inst.instruction);
12131 inst.instruction |= inst.operands[0].reg;
12132 inst.instruction |= inst.operands[1].reg << 3;
12141 Rd = inst.operands[0].reg;
12142 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12144 reject_bad_reg (Rd);
12145 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12146 reject_bad_reg (Rn);
12148 inst.instruction |= Rd << 8;
12149 inst.instruction |= Rn << 16;
12151 if (!inst.operands[2].isreg)
12153 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12154 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12160 Rm = inst.operands[2].reg;
12161 reject_bad_reg (Rm);
12163 constraint (inst.operands[2].shifted
12164 && inst.operands[2].immisreg,
12165 _("shift must be constant"));
12166 encode_thumb32_shifted_operand (2);
12173 unsigned Rd, Rn, Rm;
12175 Rd = inst.operands[0].reg;
12176 Rn = inst.operands[1].reg;
12177 Rm = inst.operands[2].reg;
12179 reject_bad_reg (Rd);
12180 reject_bad_reg (Rn);
12181 reject_bad_reg (Rm);
12183 inst.instruction |= Rd << 8;
12184 inst.instruction |= Rn << 16;
12185 inst.instruction |= Rm;
12186 if (inst.operands[3].present)
12188 unsigned int val = inst.reloc.exp.X_add_number;
12189 constraint (inst.reloc.exp.X_op != O_constant,
12190 _("expression too complex"));
12191 inst.instruction |= (val & 0x1c) << 10;
12192 inst.instruction |= (val & 0x03) << 6;
12199 if (!inst.operands[3].present)
12203 inst.instruction &= ~0x00000020;
12205 /* PR 10168. Swap the Rm and Rn registers. */
12206 Rtmp = inst.operands[1].reg;
12207 inst.operands[1].reg = inst.operands[2].reg;
12208 inst.operands[2].reg = Rtmp;
12216 if (inst.operands[0].immisreg)
12217 reject_bad_reg (inst.operands[0].imm);
12219 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12223 do_t_push_pop (void)
12227 constraint (inst.operands[0].writeback,
12228 _("push/pop do not support {reglist}^"));
12229 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12230 _("expression too complex"));
12232 mask = inst.operands[0].imm;
12233 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12234 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12235 else if (inst.size_req != 4
12236 && (mask & ~0xff) == (1 << (inst.instruction == T_MNEM_push
12237 ? REG_LR : REG_PC)))
12239 inst.instruction = THUMB_OP16 (inst.instruction);
12240 inst.instruction |= THUMB_PP_PC_LR;
12241 inst.instruction |= mask & 0xff;
12243 else if (unified_syntax)
12245 inst.instruction = THUMB_OP32 (inst.instruction);
12246 encode_thumb2_ldmstm (13, mask, TRUE);
12250 inst.error = _("invalid register list to push/pop instruction");
12260 Rd = inst.operands[0].reg;
12261 Rm = inst.operands[1].reg;
12263 reject_bad_reg (Rd);
12264 reject_bad_reg (Rm);
12266 inst.instruction |= Rd << 8;
12267 inst.instruction |= Rm << 16;
12268 inst.instruction |= Rm;
12276 Rd = inst.operands[0].reg;
12277 Rm = inst.operands[1].reg;
12279 reject_bad_reg (Rd);
12280 reject_bad_reg (Rm);
12282 if (Rd <= 7 && Rm <= 7
12283 && inst.size_req != 4)
12285 inst.instruction = THUMB_OP16 (inst.instruction);
12286 inst.instruction |= Rd;
12287 inst.instruction |= Rm << 3;
12289 else if (unified_syntax)
12291 inst.instruction = THUMB_OP32 (inst.instruction);
12292 inst.instruction |= Rd << 8;
12293 inst.instruction |= Rm << 16;
12294 inst.instruction |= Rm;
12297 inst.error = BAD_HIREG;
12305 Rd = inst.operands[0].reg;
12306 Rm = inst.operands[1].reg;
12308 reject_bad_reg (Rd);
12309 reject_bad_reg (Rm);
12311 inst.instruction |= Rd << 8;
12312 inst.instruction |= Rm;
12320 Rd = inst.operands[0].reg;
12321 Rs = (inst.operands[1].present
12322 ? inst.operands[1].reg /* Rd, Rs, foo */
12323 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12325 reject_bad_reg (Rd);
12326 reject_bad_reg (Rs);
12327 if (inst.operands[2].isreg)
12328 reject_bad_reg (inst.operands[2].reg);
12330 inst.instruction |= Rd << 8;
12331 inst.instruction |= Rs << 16;
12332 if (!inst.operands[2].isreg)
12334 bfd_boolean narrow;
12336 if ((inst.instruction & 0x00100000) != 0)
12337 narrow = !in_it_block ();
12339 narrow = in_it_block ();
12341 if (Rd > 7 || Rs > 7)
12344 if (inst.size_req == 4 || !unified_syntax)
12347 if (inst.reloc.exp.X_op != O_constant
12348 || inst.reloc.exp.X_add_number != 0)
12351 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12352 relaxation, but it doesn't seem worth the hassle. */
12355 inst.reloc.type = BFD_RELOC_UNUSED;
12356 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12357 inst.instruction |= Rs << 3;
12358 inst.instruction |= Rd;
12362 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12363 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12367 encode_thumb32_shifted_operand (2);
12373 if (warn_on_deprecated
12374 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12375 as_warn (_("setend use is deprecated for ARMv8"));
12377 set_it_insn_type (OUTSIDE_IT_INSN);
12378 if (inst.operands[0].imm)
12379 inst.instruction |= 0x8;
12385 if (!inst.operands[1].present)
12386 inst.operands[1].reg = inst.operands[0].reg;
12388 if (unified_syntax)
12390 bfd_boolean narrow;
12393 switch (inst.instruction)
12396 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12398 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12400 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12402 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12406 if (THUMB_SETS_FLAGS (inst.instruction))
12407 narrow = !in_it_block ();
12409 narrow = in_it_block ();
12410 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12412 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12414 if (inst.operands[2].isreg
12415 && (inst.operands[1].reg != inst.operands[0].reg
12416 || inst.operands[2].reg > 7))
12418 if (inst.size_req == 4)
12421 reject_bad_reg (inst.operands[0].reg);
12422 reject_bad_reg (inst.operands[1].reg);
12426 if (inst.operands[2].isreg)
12428 reject_bad_reg (inst.operands[2].reg);
12429 inst.instruction = THUMB_OP32 (inst.instruction);
12430 inst.instruction |= inst.operands[0].reg << 8;
12431 inst.instruction |= inst.operands[1].reg << 16;
12432 inst.instruction |= inst.operands[2].reg;
12434 /* PR 12854: Error on extraneous shifts. */
12435 constraint (inst.operands[2].shifted,
12436 _("extraneous shift as part of operand to shift insn"));
12440 inst.operands[1].shifted = 1;
12441 inst.operands[1].shift_kind = shift_kind;
12442 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12443 ? T_MNEM_movs : T_MNEM_mov);
12444 inst.instruction |= inst.operands[0].reg << 8;
12445 encode_thumb32_shifted_operand (1);
12446 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12447 inst.reloc.type = BFD_RELOC_UNUSED;
12452 if (inst.operands[2].isreg)
12454 switch (shift_kind)
12456 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12457 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12458 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12459 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12463 inst.instruction |= inst.operands[0].reg;
12464 inst.instruction |= inst.operands[2].reg << 3;
12466 /* PR 12854: Error on extraneous shifts. */
12467 constraint (inst.operands[2].shifted,
12468 _("extraneous shift as part of operand to shift insn"));
12472 switch (shift_kind)
12474 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12475 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12476 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12479 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12480 inst.instruction |= inst.operands[0].reg;
12481 inst.instruction |= inst.operands[1].reg << 3;
12487 constraint (inst.operands[0].reg > 7
12488 || inst.operands[1].reg > 7, BAD_HIREG);
12489 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12491 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12493 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12494 constraint (inst.operands[0].reg != inst.operands[1].reg,
12495 _("source1 and dest must be same register"));
12497 switch (inst.instruction)
12499 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12500 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12501 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12502 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12506 inst.instruction |= inst.operands[0].reg;
12507 inst.instruction |= inst.operands[2].reg << 3;
12509 /* PR 12854: Error on extraneous shifts. */
12510 constraint (inst.operands[2].shifted,
12511 _("extraneous shift as part of operand to shift insn"));
12515 switch (inst.instruction)
12517 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12518 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12519 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12520 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12523 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12524 inst.instruction |= inst.operands[0].reg;
12525 inst.instruction |= inst.operands[1].reg << 3;
12533 unsigned Rd, Rn, Rm;
12535 Rd = inst.operands[0].reg;
12536 Rn = inst.operands[1].reg;
12537 Rm = inst.operands[2].reg;
12539 reject_bad_reg (Rd);
12540 reject_bad_reg (Rn);
12541 reject_bad_reg (Rm);
12543 inst.instruction |= Rd << 8;
12544 inst.instruction |= Rn << 16;
12545 inst.instruction |= Rm;
12551 unsigned Rd, Rn, Rm;
12553 Rd = inst.operands[0].reg;
12554 Rm = inst.operands[1].reg;
12555 Rn = inst.operands[2].reg;
12557 reject_bad_reg (Rd);
12558 reject_bad_reg (Rn);
12559 reject_bad_reg (Rm);
12561 inst.instruction |= Rd << 8;
12562 inst.instruction |= Rn << 16;
12563 inst.instruction |= Rm;
12569 unsigned int value = inst.reloc.exp.X_add_number;
12570 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
12571 _("SMC is not permitted on this architecture"));
12572 constraint (inst.reloc.exp.X_op != O_constant,
12573 _("expression too complex"));
12574 inst.reloc.type = BFD_RELOC_UNUSED;
12575 inst.instruction |= (value & 0xf000) >> 12;
12576 inst.instruction |= (value & 0x0ff0);
12577 inst.instruction |= (value & 0x000f) << 16;
12578 /* PR gas/15623: SMC instructions must be last in an IT block. */
12579 set_it_insn_type_last ();
12585 unsigned int value = inst.reloc.exp.X_add_number;
12587 inst.reloc.type = BFD_RELOC_UNUSED;
12588 inst.instruction |= (value & 0x0fff);
12589 inst.instruction |= (value & 0xf000) << 4;
12593 do_t_ssat_usat (int bias)
12597 Rd = inst.operands[0].reg;
12598 Rn = inst.operands[2].reg;
12600 reject_bad_reg (Rd);
12601 reject_bad_reg (Rn);
12603 inst.instruction |= Rd << 8;
12604 inst.instruction |= inst.operands[1].imm - bias;
12605 inst.instruction |= Rn << 16;
12607 if (inst.operands[3].present)
12609 offsetT shift_amount = inst.reloc.exp.X_add_number;
12611 inst.reloc.type = BFD_RELOC_UNUSED;
12613 constraint (inst.reloc.exp.X_op != O_constant,
12614 _("expression too complex"));
12616 if (shift_amount != 0)
12618 constraint (shift_amount > 31,
12619 _("shift expression is too large"));
12621 if (inst.operands[3].shift_kind == SHIFT_ASR)
12622 inst.instruction |= 0x00200000; /* sh bit. */
12624 inst.instruction |= (shift_amount & 0x1c) << 10;
12625 inst.instruction |= (shift_amount & 0x03) << 6;
12633 do_t_ssat_usat (1);
12641 Rd = inst.operands[0].reg;
12642 Rn = inst.operands[2].reg;
12644 reject_bad_reg (Rd);
12645 reject_bad_reg (Rn);
12647 inst.instruction |= Rd << 8;
12648 inst.instruction |= inst.operands[1].imm - 1;
12649 inst.instruction |= Rn << 16;
12655 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
12656 || inst.operands[2].postind || inst.operands[2].writeback
12657 || inst.operands[2].immisreg || inst.operands[2].shifted
12658 || inst.operands[2].negative,
12661 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
12663 inst.instruction |= inst.operands[0].reg << 8;
12664 inst.instruction |= inst.operands[1].reg << 12;
12665 inst.instruction |= inst.operands[2].reg << 16;
12666 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
12672 if (!inst.operands[2].present)
12673 inst.operands[2].reg = inst.operands[1].reg + 1;
12675 constraint (inst.operands[0].reg == inst.operands[1].reg
12676 || inst.operands[0].reg == inst.operands[2].reg
12677 || inst.operands[0].reg == inst.operands[3].reg,
12680 inst.instruction |= inst.operands[0].reg;
12681 inst.instruction |= inst.operands[1].reg << 12;
12682 inst.instruction |= inst.operands[2].reg << 8;
12683 inst.instruction |= inst.operands[3].reg << 16;
12689 unsigned Rd, Rn, Rm;
12691 Rd = inst.operands[0].reg;
12692 Rn = inst.operands[1].reg;
12693 Rm = inst.operands[2].reg;
12695 reject_bad_reg (Rd);
12696 reject_bad_reg (Rn);
12697 reject_bad_reg (Rm);
12699 inst.instruction |= Rd << 8;
12700 inst.instruction |= Rn << 16;
12701 inst.instruction |= Rm;
12702 inst.instruction |= inst.operands[3].imm << 4;
12710 Rd = inst.operands[0].reg;
12711 Rm = inst.operands[1].reg;
12713 reject_bad_reg (Rd);
12714 reject_bad_reg (Rm);
12716 if (inst.instruction <= 0xffff
12717 && inst.size_req != 4
12718 && Rd <= 7 && Rm <= 7
12719 && (!inst.operands[2].present || inst.operands[2].imm == 0))
12721 inst.instruction = THUMB_OP16 (inst.instruction);
12722 inst.instruction |= Rd;
12723 inst.instruction |= Rm << 3;
12725 else if (unified_syntax)
12727 if (inst.instruction <= 0xffff)
12728 inst.instruction = THUMB_OP32 (inst.instruction);
12729 inst.instruction |= Rd << 8;
12730 inst.instruction |= Rm;
12731 inst.instruction |= inst.operands[2].imm << 4;
12735 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
12736 _("Thumb encoding does not support rotation"));
12737 constraint (1, BAD_HIREG);
12744 /* We have to do the following check manually as ARM_EXT_OS only applies
12746 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6m))
12748 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_os)
12749 /* This only applies to the v6m howver, not later architectures. */
12750 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7))
12751 as_bad (_("SVC is not permitted on this architecture"));
12752 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, arm_ext_os);
12755 inst.reloc.type = BFD_RELOC_ARM_SWI;
12764 half = (inst.instruction & 0x10) != 0;
12765 set_it_insn_type_last ();
12766 constraint (inst.operands[0].immisreg,
12767 _("instruction requires register index"));
12769 Rn = inst.operands[0].reg;
12770 Rm = inst.operands[0].imm;
12772 constraint (Rn == REG_SP, BAD_SP);
12773 reject_bad_reg (Rm);
12775 constraint (!half && inst.operands[0].shifted,
12776 _("instruction does not allow shifted index"));
12777 inst.instruction |= (Rn << 16) | Rm;
12783 if (!inst.operands[0].present)
12784 inst.operands[0].imm = 0;
12786 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
12788 constraint (inst.size_req == 2,
12789 _("immediate value out of range"));
12790 inst.instruction = THUMB_OP32 (inst.instruction);
12791 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
12792 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
12796 inst.instruction = THUMB_OP16 (inst.instruction);
12797 inst.instruction |= inst.operands[0].imm;
12800 set_it_insn_type (NEUTRAL_IT_INSN);
12807 do_t_ssat_usat (0);
12815 Rd = inst.operands[0].reg;
12816 Rn = inst.operands[2].reg;
12818 reject_bad_reg (Rd);
12819 reject_bad_reg (Rn);
12821 inst.instruction |= Rd << 8;
12822 inst.instruction |= inst.operands[1].imm;
12823 inst.instruction |= Rn << 16;
12826 /* Neon instruction encoder helpers. */
12828 /* Encodings for the different types for various Neon opcodes. */
12830 /* An "invalid" code for the following tables. */
12833 struct neon_tab_entry
12836 unsigned float_or_poly;
12837 unsigned scalar_or_imm;
12840 /* Map overloaded Neon opcodes to their respective encodings. */
12841 #define NEON_ENC_TAB \
12842 X(vabd, 0x0000700, 0x1200d00, N_INV), \
12843 X(vmax, 0x0000600, 0x0000f00, N_INV), \
12844 X(vmin, 0x0000610, 0x0200f00, N_INV), \
12845 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
12846 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
12847 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
12848 X(vadd, 0x0000800, 0x0000d00, N_INV), \
12849 X(vsub, 0x1000800, 0x0200d00, N_INV), \
12850 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
12851 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
12852 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
12853 /* Register variants of the following two instructions are encoded as
12854 vcge / vcgt with the operands reversed. */ \
12855 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
12856 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
12857 X(vfma, N_INV, 0x0000c10, N_INV), \
12858 X(vfms, N_INV, 0x0200c10, N_INV), \
12859 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
12860 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
12861 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
12862 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
12863 X(vmlal, 0x0800800, N_INV, 0x0800240), \
12864 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
12865 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
12866 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
12867 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
12868 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
12869 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
12870 X(vshl, 0x0000400, N_INV, 0x0800510), \
12871 X(vqshl, 0x0000410, N_INV, 0x0800710), \
12872 X(vand, 0x0000110, N_INV, 0x0800030), \
12873 X(vbic, 0x0100110, N_INV, 0x0800030), \
12874 X(veor, 0x1000110, N_INV, N_INV), \
12875 X(vorn, 0x0300110, N_INV, 0x0800010), \
12876 X(vorr, 0x0200110, N_INV, 0x0800010), \
12877 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
12878 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
12879 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
12880 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
12881 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
12882 X(vst1, 0x0000000, 0x0800000, N_INV), \
12883 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
12884 X(vst2, 0x0000100, 0x0800100, N_INV), \
12885 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
12886 X(vst3, 0x0000200, 0x0800200, N_INV), \
12887 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
12888 X(vst4, 0x0000300, 0x0800300, N_INV), \
12889 X(vmovn, 0x1b20200, N_INV, N_INV), \
12890 X(vtrn, 0x1b20080, N_INV, N_INV), \
12891 X(vqmovn, 0x1b20200, N_INV, N_INV), \
12892 X(vqmovun, 0x1b20240, N_INV, N_INV), \
12893 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
12894 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
12895 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
12896 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
12897 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
12898 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
12899 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
12900 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
12901 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
12902 X(vseleq, 0xe000a00, N_INV, N_INV), \
12903 X(vselvs, 0xe100a00, N_INV, N_INV), \
12904 X(vselge, 0xe200a00, N_INV, N_INV), \
12905 X(vselgt, 0xe300a00, N_INV, N_INV), \
12906 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
12907 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
12908 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
12909 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
12910 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
12911 X(aes, 0x3b00300, N_INV, N_INV), \
12912 X(sha3op, 0x2000c00, N_INV, N_INV), \
12913 X(sha1h, 0x3b902c0, N_INV, N_INV), \
12914 X(sha2op, 0x3ba0380, N_INV, N_INV)
12918 #define X(OPC,I,F,S) N_MNEM_##OPC
12923 static const struct neon_tab_entry neon_enc_tab[] =
12925 #define X(OPC,I,F,S) { (I), (F), (S) }
12930 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
12931 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12932 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12933 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12934 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12935 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12936 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12937 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
12938 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
12939 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
12940 #define NEON_ENC_SINGLE_(X) \
12941 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
12942 #define NEON_ENC_DOUBLE_(X) \
12943 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
12944 #define NEON_ENC_FPV8_(X) \
12945 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
12947 #define NEON_ENCODE(type, inst) \
12950 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
12951 inst.is_neon = 1; \
12955 #define check_neon_suffixes \
12958 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
12960 as_bad (_("invalid neon suffix for non neon instruction")); \
12966 /* Define shapes for instruction operands. The following mnemonic characters
12967 are used in this table:
12969 F - VFP S<n> register
12970 D - Neon D<n> register
12971 Q - Neon Q<n> register
12975 L - D<n> register list
12977 This table is used to generate various data:
12978 - enumerations of the form NS_DDR to be used as arguments to
12980 - a table classifying shapes into single, double, quad, mixed.
12981 - a table used to drive neon_select_shape. */
12983 #define NEON_SHAPE_DEF \
12984 X(3, (D, D, D), DOUBLE), \
12985 X(3, (Q, Q, Q), QUAD), \
12986 X(3, (D, D, I), DOUBLE), \
12987 X(3, (Q, Q, I), QUAD), \
12988 X(3, (D, D, S), DOUBLE), \
12989 X(3, (Q, Q, S), QUAD), \
12990 X(2, (D, D), DOUBLE), \
12991 X(2, (Q, Q), QUAD), \
12992 X(2, (D, S), DOUBLE), \
12993 X(2, (Q, S), QUAD), \
12994 X(2, (D, R), DOUBLE), \
12995 X(2, (Q, R), QUAD), \
12996 X(2, (D, I), DOUBLE), \
12997 X(2, (Q, I), QUAD), \
12998 X(3, (D, L, D), DOUBLE), \
12999 X(2, (D, Q), MIXED), \
13000 X(2, (Q, D), MIXED), \
13001 X(3, (D, Q, I), MIXED), \
13002 X(3, (Q, D, I), MIXED), \
13003 X(3, (Q, D, D), MIXED), \
13004 X(3, (D, Q, Q), MIXED), \
13005 X(3, (Q, Q, D), MIXED), \
13006 X(3, (Q, D, S), MIXED), \
13007 X(3, (D, Q, S), MIXED), \
13008 X(4, (D, D, D, I), DOUBLE), \
13009 X(4, (Q, Q, Q, I), QUAD), \
13010 X(2, (F, F), SINGLE), \
13011 X(3, (F, F, F), SINGLE), \
13012 X(2, (F, I), SINGLE), \
13013 X(2, (F, D), MIXED), \
13014 X(2, (D, F), MIXED), \
13015 X(3, (F, F, I), MIXED), \
13016 X(4, (R, R, F, F), SINGLE), \
13017 X(4, (F, F, R, R), SINGLE), \
13018 X(3, (D, R, R), DOUBLE), \
13019 X(3, (R, R, D), DOUBLE), \
13020 X(2, (S, R), SINGLE), \
13021 X(2, (R, S), SINGLE), \
13022 X(2, (F, R), SINGLE), \
13023 X(2, (R, F), SINGLE)
13025 #define S2(A,B) NS_##A##B
13026 #define S3(A,B,C) NS_##A##B##C
13027 #define S4(A,B,C,D) NS_##A##B##C##D
13029 #define X(N, L, C) S##N L
13042 enum neon_shape_class
13050 #define X(N, L, C) SC_##C
13052 static enum neon_shape_class neon_shape_class[] =
13070 /* Register widths of above. */
13071 static unsigned neon_shape_el_size[] =
13082 struct neon_shape_info
13085 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13088 #define S2(A,B) { SE_##A, SE_##B }
13089 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13090 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13092 #define X(N, L, C) { N, S##N L }
13094 static struct neon_shape_info neon_shape_tab[] =
13104 /* Bit masks used in type checking given instructions.
13105 'N_EQK' means the type must be the same as (or based on in some way) the key
13106 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13107 set, various other bits can be set as well in order to modify the meaning of
13108 the type constraint. */
13110 enum neon_type_mask
13134 N_KEY = 0x1000000, /* Key element (main type specifier). */
13135 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13136 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13137 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13138 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13139 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13140 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13141 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13142 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13143 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13144 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13146 N_MAX_NONSPECIAL = N_P64
13149 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13151 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13152 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13153 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13154 #define N_SUF_32 (N_SU_32 | N_F32)
13155 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13156 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
13158 /* Pass this as the first type argument to neon_check_type to ignore types
13160 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13162 /* Select a "shape" for the current instruction (describing register types or
13163 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13164 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13165 function of operand parsing, so this function doesn't need to be called.
13166 Shapes should be listed in order of decreasing length. */
13168 static enum neon_shape
13169 neon_select_shape (enum neon_shape shape, ...)
13172 enum neon_shape first_shape = shape;
13174 /* Fix missing optional operands. FIXME: we don't know at this point how
13175 many arguments we should have, so this makes the assumption that we have
13176 > 1. This is true of all current Neon opcodes, I think, but may not be
13177 true in the future. */
13178 if (!inst.operands[1].present)
13179 inst.operands[1] = inst.operands[0];
13181 va_start (ap, shape);
13183 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13188 for (j = 0; j < neon_shape_tab[shape].els; j++)
13190 if (!inst.operands[j].present)
13196 switch (neon_shape_tab[shape].el[j])
13199 if (!(inst.operands[j].isreg
13200 && inst.operands[j].isvec
13201 && inst.operands[j].issingle
13202 && !inst.operands[j].isquad))
13207 if (!(inst.operands[j].isreg
13208 && inst.operands[j].isvec
13209 && !inst.operands[j].isquad
13210 && !inst.operands[j].issingle))
13215 if (!(inst.operands[j].isreg
13216 && !inst.operands[j].isvec))
13221 if (!(inst.operands[j].isreg
13222 && inst.operands[j].isvec
13223 && inst.operands[j].isquad
13224 && !inst.operands[j].issingle))
13229 if (!(!inst.operands[j].isreg
13230 && !inst.operands[j].isscalar))
13235 if (!(!inst.operands[j].isreg
13236 && inst.operands[j].isscalar))
13246 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13247 /* We've matched all the entries in the shape table, and we don't
13248 have any left over operands which have not been matched. */
13254 if (shape == NS_NULL && first_shape != NS_NULL)
13255 first_error (_("invalid instruction shape"));
13260 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13261 means the Q bit should be set). */
13264 neon_quad (enum neon_shape shape)
13266 return neon_shape_class[shape] == SC_QUAD;
13270 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13273 /* Allow modification to be made to types which are constrained to be
13274 based on the key element, based on bits set alongside N_EQK. */
13275 if ((typebits & N_EQK) != 0)
13277 if ((typebits & N_HLF) != 0)
13279 else if ((typebits & N_DBL) != 0)
13281 if ((typebits & N_SGN) != 0)
13282 *g_type = NT_signed;
13283 else if ((typebits & N_UNS) != 0)
13284 *g_type = NT_unsigned;
13285 else if ((typebits & N_INT) != 0)
13286 *g_type = NT_integer;
13287 else if ((typebits & N_FLT) != 0)
13288 *g_type = NT_float;
13289 else if ((typebits & N_SIZ) != 0)
13290 *g_type = NT_untyped;
13294 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13295 operand type, i.e. the single type specified in a Neon instruction when it
13296 is the only one given. */
13298 static struct neon_type_el
13299 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13301 struct neon_type_el dest = *key;
13303 gas_assert ((thisarg & N_EQK) != 0);
13305 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13310 /* Convert Neon type and size into compact bitmask representation. */
13312 static enum neon_type_mask
13313 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13320 case 8: return N_8;
13321 case 16: return N_16;
13322 case 32: return N_32;
13323 case 64: return N_64;
13331 case 8: return N_I8;
13332 case 16: return N_I16;
13333 case 32: return N_I32;
13334 case 64: return N_I64;
13342 case 16: return N_F16;
13343 case 32: return N_F32;
13344 case 64: return N_F64;
13352 case 8: return N_P8;
13353 case 16: return N_P16;
13354 case 64: return N_P64;
13362 case 8: return N_S8;
13363 case 16: return N_S16;
13364 case 32: return N_S32;
13365 case 64: return N_S64;
13373 case 8: return N_U8;
13374 case 16: return N_U16;
13375 case 32: return N_U32;
13376 case 64: return N_U64;
13387 /* Convert compact Neon bitmask type representation to a type and size. Only
13388 handles the case where a single bit is set in the mask. */
13391 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13392 enum neon_type_mask mask)
13394 if ((mask & N_EQK) != 0)
13397 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13399 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13401 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13403 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13408 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13410 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13411 *type = NT_unsigned;
13412 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13413 *type = NT_integer;
13414 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13415 *type = NT_untyped;
13416 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13418 else if ((mask & (N_F16 | N_F32 | N_F64)) != 0)
13426 /* Modify a bitmask of allowed types. This is only needed for type
13430 modify_types_allowed (unsigned allowed, unsigned mods)
13433 enum neon_el_type type;
13439 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13441 if (el_type_of_type_chk (&type, &size,
13442 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13444 neon_modify_type_size (mods, &type, &size);
13445 destmask |= type_chk_of_el_type (type, size);
13452 /* Check type and return type classification.
13453 The manual states (paraphrase): If one datatype is given, it indicates the
13455 - the second operand, if there is one
13456 - the operand, if there is no second operand
13457 - the result, if there are no operands.
13458 This isn't quite good enough though, so we use a concept of a "key" datatype
13459 which is set on a per-instruction basis, which is the one which matters when
13460 only one data type is written.
13461 Note: this function has side-effects (e.g. filling in missing operands). All
13462 Neon instructions should call it before performing bit encoding. */
13464 static struct neon_type_el
13465 neon_check_type (unsigned els, enum neon_shape ns, ...)
13468 unsigned i, pass, key_el = 0;
13469 unsigned types[NEON_MAX_TYPE_ELS];
13470 enum neon_el_type k_type = NT_invtype;
13471 unsigned k_size = -1u;
13472 struct neon_type_el badtype = {NT_invtype, -1};
13473 unsigned key_allowed = 0;
13475 /* Optional registers in Neon instructions are always (not) in operand 1.
13476 Fill in the missing operand here, if it was omitted. */
13477 if (els > 1 && !inst.operands[1].present)
13478 inst.operands[1] = inst.operands[0];
13480 /* Suck up all the varargs. */
13482 for (i = 0; i < els; i++)
13484 unsigned thisarg = va_arg (ap, unsigned);
13485 if (thisarg == N_IGNORE_TYPE)
13490 types[i] = thisarg;
13491 if ((thisarg & N_KEY) != 0)
13496 if (inst.vectype.elems > 0)
13497 for (i = 0; i < els; i++)
13498 if (inst.operands[i].vectype.type != NT_invtype)
13500 first_error (_("types specified in both the mnemonic and operands"));
13504 /* Duplicate inst.vectype elements here as necessary.
13505 FIXME: No idea if this is exactly the same as the ARM assembler,
13506 particularly when an insn takes one register and one non-register
13508 if (inst.vectype.elems == 1 && els > 1)
13511 inst.vectype.elems = els;
13512 inst.vectype.el[key_el] = inst.vectype.el[0];
13513 for (j = 0; j < els; j++)
13515 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13518 else if (inst.vectype.elems == 0 && els > 0)
13521 /* No types were given after the mnemonic, so look for types specified
13522 after each operand. We allow some flexibility here; as long as the
13523 "key" operand has a type, we can infer the others. */
13524 for (j = 0; j < els; j++)
13525 if (inst.operands[j].vectype.type != NT_invtype)
13526 inst.vectype.el[j] = inst.operands[j].vectype;
13528 if (inst.operands[key_el].vectype.type != NT_invtype)
13530 for (j = 0; j < els; j++)
13531 if (inst.operands[j].vectype.type == NT_invtype)
13532 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
13537 first_error (_("operand types can't be inferred"));
13541 else if (inst.vectype.elems != els)
13543 first_error (_("type specifier has the wrong number of parts"));
13547 for (pass = 0; pass < 2; pass++)
13549 for (i = 0; i < els; i++)
13551 unsigned thisarg = types[i];
13552 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
13553 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
13554 enum neon_el_type g_type = inst.vectype.el[i].type;
13555 unsigned g_size = inst.vectype.el[i].size;
13557 /* Decay more-specific signed & unsigned types to sign-insensitive
13558 integer types if sign-specific variants are unavailable. */
13559 if ((g_type == NT_signed || g_type == NT_unsigned)
13560 && (types_allowed & N_SU_ALL) == 0)
13561 g_type = NT_integer;
13563 /* If only untyped args are allowed, decay any more specific types to
13564 them. Some instructions only care about signs for some element
13565 sizes, so handle that properly. */
13566 if (((types_allowed & N_UNT) == 0)
13567 && ((g_size == 8 && (types_allowed & N_8) != 0)
13568 || (g_size == 16 && (types_allowed & N_16) != 0)
13569 || (g_size == 32 && (types_allowed & N_32) != 0)
13570 || (g_size == 64 && (types_allowed & N_64) != 0)))
13571 g_type = NT_untyped;
13575 if ((thisarg & N_KEY) != 0)
13579 key_allowed = thisarg & ~N_KEY;
13584 if ((thisarg & N_VFP) != 0)
13586 enum neon_shape_el regshape;
13587 unsigned regwidth, match;
13589 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
13592 first_error (_("invalid instruction shape"));
13595 regshape = neon_shape_tab[ns].el[i];
13596 regwidth = neon_shape_el_size[regshape];
13598 /* In VFP mode, operands must match register widths. If we
13599 have a key operand, use its width, else use the width of
13600 the current operand. */
13606 if (regwidth != match)
13608 first_error (_("operand size must match register width"));
13613 if ((thisarg & N_EQK) == 0)
13615 unsigned given_type = type_chk_of_el_type (g_type, g_size);
13617 if ((given_type & types_allowed) == 0)
13619 first_error (_("bad type in Neon instruction"));
13625 enum neon_el_type mod_k_type = k_type;
13626 unsigned mod_k_size = k_size;
13627 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
13628 if (g_type != mod_k_type || g_size != mod_k_size)
13630 first_error (_("inconsistent types in Neon instruction"));
13638 return inst.vectype.el[key_el];
13641 /* Neon-style VFP instruction forwarding. */
13643 /* Thumb VFP instructions have 0xE in the condition field. */
13646 do_vfp_cond_or_thumb (void)
13651 inst.instruction |= 0xe0000000;
13653 inst.instruction |= inst.cond << 28;
13656 /* Look up and encode a simple mnemonic, for use as a helper function for the
13657 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
13658 etc. It is assumed that operand parsing has already been done, and that the
13659 operands are in the form expected by the given opcode (this isn't necessarily
13660 the same as the form in which they were parsed, hence some massaging must
13661 take place before this function is called).
13662 Checks current arch version against that in the looked-up opcode. */
13665 do_vfp_nsyn_opcode (const char *opname)
13667 const struct asm_opcode *opcode;
13669 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
13674 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
13675 thumb_mode ? *opcode->tvariant : *opcode->avariant),
13682 inst.instruction = opcode->tvalue;
13683 opcode->tencode ();
13687 inst.instruction = (inst.cond << 28) | opcode->avalue;
13688 opcode->aencode ();
13693 do_vfp_nsyn_add_sub (enum neon_shape rs)
13695 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
13700 do_vfp_nsyn_opcode ("fadds");
13702 do_vfp_nsyn_opcode ("fsubs");
13707 do_vfp_nsyn_opcode ("faddd");
13709 do_vfp_nsyn_opcode ("fsubd");
13713 /* Check operand types to see if this is a VFP instruction, and if so call
13717 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
13719 enum neon_shape rs;
13720 struct neon_type_el et;
13725 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13726 et = neon_check_type (2, rs,
13727 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13731 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13732 et = neon_check_type (3, rs,
13733 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13740 if (et.type != NT_invtype)
13751 do_vfp_nsyn_mla_mls (enum neon_shape rs)
13753 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
13758 do_vfp_nsyn_opcode ("fmacs");
13760 do_vfp_nsyn_opcode ("fnmacs");
13765 do_vfp_nsyn_opcode ("fmacd");
13767 do_vfp_nsyn_opcode ("fnmacd");
13772 do_vfp_nsyn_fma_fms (enum neon_shape rs)
13774 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
13779 do_vfp_nsyn_opcode ("ffmas");
13781 do_vfp_nsyn_opcode ("ffnmas");
13786 do_vfp_nsyn_opcode ("ffmad");
13788 do_vfp_nsyn_opcode ("ffnmad");
13793 do_vfp_nsyn_mul (enum neon_shape rs)
13796 do_vfp_nsyn_opcode ("fmuls");
13798 do_vfp_nsyn_opcode ("fmuld");
13802 do_vfp_nsyn_abs_neg (enum neon_shape rs)
13804 int is_neg = (inst.instruction & 0x80) != 0;
13805 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
13810 do_vfp_nsyn_opcode ("fnegs");
13812 do_vfp_nsyn_opcode ("fabss");
13817 do_vfp_nsyn_opcode ("fnegd");
13819 do_vfp_nsyn_opcode ("fabsd");
13823 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
13824 insns belong to Neon, and are handled elsewhere. */
13827 do_vfp_nsyn_ldm_stm (int is_dbmode)
13829 int is_ldm = (inst.instruction & (1 << 20)) != 0;
13833 do_vfp_nsyn_opcode ("fldmdbs");
13835 do_vfp_nsyn_opcode ("fldmias");
13840 do_vfp_nsyn_opcode ("fstmdbs");
13842 do_vfp_nsyn_opcode ("fstmias");
13847 do_vfp_nsyn_sqrt (void)
13849 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13850 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13853 do_vfp_nsyn_opcode ("fsqrts");
13855 do_vfp_nsyn_opcode ("fsqrtd");
13859 do_vfp_nsyn_div (void)
13861 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13862 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13863 N_F32 | N_F64 | N_KEY | N_VFP);
13866 do_vfp_nsyn_opcode ("fdivs");
13868 do_vfp_nsyn_opcode ("fdivd");
13872 do_vfp_nsyn_nmul (void)
13874 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
13875 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
13876 N_F32 | N_F64 | N_KEY | N_VFP);
13880 NEON_ENCODE (SINGLE, inst);
13881 do_vfp_sp_dyadic ();
13885 NEON_ENCODE (DOUBLE, inst);
13886 do_vfp_dp_rd_rn_rm ();
13888 do_vfp_cond_or_thumb ();
13892 do_vfp_nsyn_cmp (void)
13894 if (inst.operands[1].isreg)
13896 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
13897 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
13901 NEON_ENCODE (SINGLE, inst);
13902 do_vfp_sp_monadic ();
13906 NEON_ENCODE (DOUBLE, inst);
13907 do_vfp_dp_rd_rm ();
13912 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
13913 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
13915 switch (inst.instruction & 0x0fffffff)
13918 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
13921 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
13929 NEON_ENCODE (SINGLE, inst);
13930 do_vfp_sp_compare_z ();
13934 NEON_ENCODE (DOUBLE, inst);
13938 do_vfp_cond_or_thumb ();
13942 nsyn_insert_sp (void)
13944 inst.operands[1] = inst.operands[0];
13945 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
13946 inst.operands[0].reg = REG_SP;
13947 inst.operands[0].isreg = 1;
13948 inst.operands[0].writeback = 1;
13949 inst.operands[0].present = 1;
13953 do_vfp_nsyn_push (void)
13956 if (inst.operands[1].issingle)
13957 do_vfp_nsyn_opcode ("fstmdbs");
13959 do_vfp_nsyn_opcode ("fstmdbd");
13963 do_vfp_nsyn_pop (void)
13966 if (inst.operands[1].issingle)
13967 do_vfp_nsyn_opcode ("fldmias");
13969 do_vfp_nsyn_opcode ("fldmiad");
13972 /* Fix up Neon data-processing instructions, ORing in the correct bits for
13973 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
13976 neon_dp_fixup (struct arm_it* insn)
13978 unsigned int i = insn->instruction;
13983 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
13994 insn->instruction = i;
13997 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14001 neon_logbits (unsigned x)
14003 return ffs (x) - 4;
14006 #define LOW4(R) ((R) & 0xf)
14007 #define HI1(R) (((R) >> 4) & 1)
14009 /* Encode insns with bit pattern:
14011 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14012 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14014 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14015 different meaning for some instruction. */
14018 neon_three_same (int isquad, int ubit, int size)
14020 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14021 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14022 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14023 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14024 inst.instruction |= LOW4 (inst.operands[2].reg);
14025 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14026 inst.instruction |= (isquad != 0) << 6;
14027 inst.instruction |= (ubit != 0) << 24;
14029 inst.instruction |= neon_logbits (size) << 20;
14031 neon_dp_fixup (&inst);
14034 /* Encode instructions of the form:
14036 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14037 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14039 Don't write size if SIZE == -1. */
14042 neon_two_same (int qbit, int ubit, int size)
14044 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14045 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14046 inst.instruction |= LOW4 (inst.operands[1].reg);
14047 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14048 inst.instruction |= (qbit != 0) << 6;
14049 inst.instruction |= (ubit != 0) << 24;
14052 inst.instruction |= neon_logbits (size) << 18;
14054 neon_dp_fixup (&inst);
14057 /* Neon instruction encoders, in approximate order of appearance. */
14060 do_neon_dyadic_i_su (void)
14062 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14063 struct neon_type_el et = neon_check_type (3, rs,
14064 N_EQK, N_EQK, N_SU_32 | N_KEY);
14065 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14069 do_neon_dyadic_i64_su (void)
14071 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14072 struct neon_type_el et = neon_check_type (3, rs,
14073 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14074 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14078 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14081 unsigned size = et.size >> 3;
14082 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14083 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14084 inst.instruction |= LOW4 (inst.operands[1].reg);
14085 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14086 inst.instruction |= (isquad != 0) << 6;
14087 inst.instruction |= immbits << 16;
14088 inst.instruction |= (size >> 3) << 7;
14089 inst.instruction |= (size & 0x7) << 19;
14091 inst.instruction |= (uval != 0) << 24;
14093 neon_dp_fixup (&inst);
14097 do_neon_shl_imm (void)
14099 if (!inst.operands[2].isreg)
14101 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14102 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14103 NEON_ENCODE (IMMED, inst);
14104 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
14108 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14109 struct neon_type_el et = neon_check_type (3, rs,
14110 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14113 /* VSHL/VQSHL 3-register variants have syntax such as:
14115 whereas other 3-register operations encoded by neon_three_same have
14118 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14120 tmp = inst.operands[2].reg;
14121 inst.operands[2].reg = inst.operands[1].reg;
14122 inst.operands[1].reg = tmp;
14123 NEON_ENCODE (INTEGER, inst);
14124 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14129 do_neon_qshl_imm (void)
14131 if (!inst.operands[2].isreg)
14133 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14134 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14136 NEON_ENCODE (IMMED, inst);
14137 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
14138 inst.operands[2].imm);
14142 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14143 struct neon_type_el et = neon_check_type (3, rs,
14144 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14147 /* See note in do_neon_shl_imm. */
14148 tmp = inst.operands[2].reg;
14149 inst.operands[2].reg = inst.operands[1].reg;
14150 inst.operands[1].reg = tmp;
14151 NEON_ENCODE (INTEGER, inst);
14152 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14157 do_neon_rshl (void)
14159 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14160 struct neon_type_el et = neon_check_type (3, rs,
14161 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14164 tmp = inst.operands[2].reg;
14165 inst.operands[2].reg = inst.operands[1].reg;
14166 inst.operands[1].reg = tmp;
14167 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14171 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14173 /* Handle .I8 pseudo-instructions. */
14176 /* Unfortunately, this will make everything apart from zero out-of-range.
14177 FIXME is this the intended semantics? There doesn't seem much point in
14178 accepting .I8 if so. */
14179 immediate |= immediate << 8;
14185 if (immediate == (immediate & 0x000000ff))
14187 *immbits = immediate;
14190 else if (immediate == (immediate & 0x0000ff00))
14192 *immbits = immediate >> 8;
14195 else if (immediate == (immediate & 0x00ff0000))
14197 *immbits = immediate >> 16;
14200 else if (immediate == (immediate & 0xff000000))
14202 *immbits = immediate >> 24;
14205 if ((immediate & 0xffff) != (immediate >> 16))
14206 goto bad_immediate;
14207 immediate &= 0xffff;
14210 if (immediate == (immediate & 0x000000ff))
14212 *immbits = immediate;
14215 else if (immediate == (immediate & 0x0000ff00))
14217 *immbits = immediate >> 8;
14222 first_error (_("immediate value out of range"));
14227 do_neon_logic (void)
14229 if (inst.operands[2].present && inst.operands[2].isreg)
14231 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14232 neon_check_type (3, rs, N_IGNORE_TYPE);
14233 /* U bit and size field were set as part of the bitmask. */
14234 NEON_ENCODE (INTEGER, inst);
14235 neon_three_same (neon_quad (rs), 0, -1);
14239 const int three_ops_form = (inst.operands[2].present
14240 && !inst.operands[2].isreg);
14241 const int immoperand = (three_ops_form ? 2 : 1);
14242 enum neon_shape rs = (three_ops_form
14243 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14244 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14245 struct neon_type_el et = neon_check_type (2, rs,
14246 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14247 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14251 if (et.type == NT_invtype)
14254 if (three_ops_form)
14255 constraint (inst.operands[0].reg != inst.operands[1].reg,
14256 _("first and second operands shall be the same register"));
14258 NEON_ENCODE (IMMED, inst);
14260 immbits = inst.operands[immoperand].imm;
14263 /* .i64 is a pseudo-op, so the immediate must be a repeating
14265 if (immbits != (inst.operands[immoperand].regisimm ?
14266 inst.operands[immoperand].reg : 0))
14268 /* Set immbits to an invalid constant. */
14269 immbits = 0xdeadbeef;
14276 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14280 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14284 /* Pseudo-instruction for VBIC. */
14285 neon_invert_size (&immbits, 0, et.size);
14286 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14290 /* Pseudo-instruction for VORR. */
14291 neon_invert_size (&immbits, 0, et.size);
14292 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14302 inst.instruction |= neon_quad (rs) << 6;
14303 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14304 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14305 inst.instruction |= cmode << 8;
14306 neon_write_immbits (immbits);
14308 neon_dp_fixup (&inst);
14313 do_neon_bitfield (void)
14315 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14316 neon_check_type (3, rs, N_IGNORE_TYPE);
14317 neon_three_same (neon_quad (rs), 0, -1);
14321 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14324 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14325 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14327 if (et.type == NT_float)
14329 NEON_ENCODE (FLOAT, inst);
14330 neon_three_same (neon_quad (rs), 0, -1);
14334 NEON_ENCODE (INTEGER, inst);
14335 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14340 do_neon_dyadic_if_su (void)
14342 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14346 do_neon_dyadic_if_su_d (void)
14348 /* This version only allow D registers, but that constraint is enforced during
14349 operand parsing so we don't need to do anything extra here. */
14350 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14354 do_neon_dyadic_if_i_d (void)
14356 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14357 affected if we specify unsigned args. */
14358 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14361 enum vfp_or_neon_is_neon_bits
14364 NEON_CHECK_ARCH = 2,
14365 NEON_CHECK_ARCH8 = 4
14368 /* Call this function if an instruction which may have belonged to the VFP or
14369 Neon instruction sets, but turned out to be a Neon instruction (due to the
14370 operand types involved, etc.). We have to check and/or fix-up a couple of
14373 - Make sure the user hasn't attempted to make a Neon instruction
14375 - Alter the value in the condition code field if necessary.
14376 - Make sure that the arch supports Neon instructions.
14378 Which of these operations take place depends on bits from enum
14379 vfp_or_neon_is_neon_bits.
14381 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14382 current instruction's condition is COND_ALWAYS, the condition field is
14383 changed to inst.uncond_value. This is necessary because instructions shared
14384 between VFP and Neon may be conditional for the VFP variants only, and the
14385 unconditional Neon version must have, e.g., 0xF in the condition field. */
14388 vfp_or_neon_is_neon (unsigned check)
14390 /* Conditions are always legal in Thumb mode (IT blocks). */
14391 if (!thumb_mode && (check & NEON_CHECK_CC))
14393 if (inst.cond != COND_ALWAYS)
14395 first_error (_(BAD_COND));
14398 if (inst.uncond_value != -1)
14399 inst.instruction |= inst.uncond_value << 28;
14402 if ((check & NEON_CHECK_ARCH)
14403 && !mark_feature_used (&fpu_neon_ext_v1))
14405 first_error (_(BAD_FPU));
14409 if ((check & NEON_CHECK_ARCH8)
14410 && !mark_feature_used (&fpu_neon_ext_armv8))
14412 first_error (_(BAD_FPU));
14420 do_neon_addsub_if_i (void)
14422 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
14425 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14428 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14429 affected if we specify unsigned args. */
14430 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
14433 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
14435 V<op> A,B (A is operand 0, B is operand 2)
14440 so handle that case specially. */
14443 neon_exchange_operands (void)
14445 void *scratch = alloca (sizeof (inst.operands[0]));
14446 if (inst.operands[1].present)
14448 /* Swap operands[1] and operands[2]. */
14449 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
14450 inst.operands[1] = inst.operands[2];
14451 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
14455 inst.operands[1] = inst.operands[2];
14456 inst.operands[2] = inst.operands[0];
14461 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
14463 if (inst.operands[2].isreg)
14466 neon_exchange_operands ();
14467 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
14471 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14472 struct neon_type_el et = neon_check_type (2, rs,
14473 N_EQK | N_SIZ, immtypes | N_KEY);
14475 NEON_ENCODE (IMMED, inst);
14476 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14477 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14478 inst.instruction |= LOW4 (inst.operands[1].reg);
14479 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14480 inst.instruction |= neon_quad (rs) << 6;
14481 inst.instruction |= (et.type == NT_float) << 10;
14482 inst.instruction |= neon_logbits (et.size) << 18;
14484 neon_dp_fixup (&inst);
14491 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
14495 do_neon_cmp_inv (void)
14497 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
14503 neon_compare (N_IF_32, N_IF_32, FALSE);
14506 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
14507 scalars, which are encoded in 5 bits, M : Rm.
14508 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
14509 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
14513 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
14515 unsigned regno = NEON_SCALAR_REG (scalar);
14516 unsigned elno = NEON_SCALAR_INDEX (scalar);
14521 if (regno > 7 || elno > 3)
14523 return regno | (elno << 3);
14526 if (regno > 15 || elno > 1)
14528 return regno | (elno << 4);
14532 first_error (_("scalar out of range for multiply instruction"));
14538 /* Encode multiply / multiply-accumulate scalar instructions. */
14541 neon_mul_mac (struct neon_type_el et, int ubit)
14545 /* Give a more helpful error message if we have an invalid type. */
14546 if (et.type == NT_invtype)
14549 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
14550 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14551 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14552 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14553 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14554 inst.instruction |= LOW4 (scalar);
14555 inst.instruction |= HI1 (scalar) << 5;
14556 inst.instruction |= (et.type == NT_float) << 8;
14557 inst.instruction |= neon_logbits (et.size) << 20;
14558 inst.instruction |= (ubit != 0) << 24;
14560 neon_dp_fixup (&inst);
14564 do_neon_mac_maybe_scalar (void)
14566 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
14569 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14572 if (inst.operands[2].isscalar)
14574 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14575 struct neon_type_el et = neon_check_type (3, rs,
14576 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
14577 NEON_ENCODE (SCALAR, inst);
14578 neon_mul_mac (et, neon_quad (rs));
14582 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14583 affected if we specify unsigned args. */
14584 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14589 do_neon_fmac (void)
14591 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
14594 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14597 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14603 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14604 struct neon_type_el et = neon_check_type (3, rs,
14605 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
14606 neon_three_same (neon_quad (rs), 0, et.size);
14609 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
14610 same types as the MAC equivalents. The polynomial type for this instruction
14611 is encoded the same as the integer type. */
14616 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
14619 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14622 if (inst.operands[2].isscalar)
14623 do_neon_mac_maybe_scalar ();
14625 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
14629 do_neon_qdmulh (void)
14631 if (inst.operands[2].isscalar)
14633 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
14634 struct neon_type_el et = neon_check_type (3, rs,
14635 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14636 NEON_ENCODE (SCALAR, inst);
14637 neon_mul_mac (et, neon_quad (rs));
14641 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14642 struct neon_type_el et = neon_check_type (3, rs,
14643 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
14644 NEON_ENCODE (INTEGER, inst);
14645 /* The U bit (rounding) comes from bit mask. */
14646 neon_three_same (neon_quad (rs), 0, et.size);
14651 do_neon_fcmp_absolute (void)
14653 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14654 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14655 /* Size field comes from bit mask. */
14656 neon_three_same (neon_quad (rs), 1, -1);
14660 do_neon_fcmp_absolute_inv (void)
14662 neon_exchange_operands ();
14663 do_neon_fcmp_absolute ();
14667 do_neon_step (void)
14669 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14670 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
14671 neon_three_same (neon_quad (rs), 0, -1);
14675 do_neon_abs_neg (void)
14677 enum neon_shape rs;
14678 struct neon_type_el et;
14680 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
14683 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
14686 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
14687 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
14689 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14690 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14691 inst.instruction |= LOW4 (inst.operands[1].reg);
14692 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14693 inst.instruction |= neon_quad (rs) << 6;
14694 inst.instruction |= (et.type == NT_float) << 10;
14695 inst.instruction |= neon_logbits (et.size) << 18;
14697 neon_dp_fixup (&inst);
14703 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14704 struct neon_type_el et = neon_check_type (2, rs,
14705 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14706 int imm = inst.operands[2].imm;
14707 constraint (imm < 0 || (unsigned)imm >= et.size,
14708 _("immediate out of range for insert"));
14709 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14715 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14716 struct neon_type_el et = neon_check_type (2, rs,
14717 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
14718 int imm = inst.operands[2].imm;
14719 constraint (imm < 1 || (unsigned)imm > et.size,
14720 _("immediate out of range for insert"));
14721 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
14725 do_neon_qshlu_imm (void)
14727 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14728 struct neon_type_el et = neon_check_type (2, rs,
14729 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
14730 int imm = inst.operands[2].imm;
14731 constraint (imm < 0 || (unsigned)imm >= et.size,
14732 _("immediate out of range for shift"));
14733 /* Only encodes the 'U present' variant of the instruction.
14734 In this case, signed types have OP (bit 8) set to 0.
14735 Unsigned types have OP set to 1. */
14736 inst.instruction |= (et.type == NT_unsigned) << 8;
14737 /* The rest of the bits are the same as other immediate shifts. */
14738 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14742 do_neon_qmovn (void)
14744 struct neon_type_el et = neon_check_type (2, NS_DQ,
14745 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14746 /* Saturating move where operands can be signed or unsigned, and the
14747 destination has the same signedness. */
14748 NEON_ENCODE (INTEGER, inst);
14749 if (et.type == NT_unsigned)
14750 inst.instruction |= 0xc0;
14752 inst.instruction |= 0x80;
14753 neon_two_same (0, 1, et.size / 2);
14757 do_neon_qmovun (void)
14759 struct neon_type_el et = neon_check_type (2, NS_DQ,
14760 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14761 /* Saturating move with unsigned results. Operands must be signed. */
14762 NEON_ENCODE (INTEGER, inst);
14763 neon_two_same (0, 1, et.size / 2);
14767 do_neon_rshift_sat_narrow (void)
14769 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14770 or unsigned. If operands are unsigned, results must also be unsigned. */
14771 struct neon_type_el et = neon_check_type (2, NS_DQI,
14772 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
14773 int imm = inst.operands[2].imm;
14774 /* This gets the bounds check, size encoding and immediate bits calculation
14778 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
14779 VQMOVN.I<size> <Dd>, <Qm>. */
14782 inst.operands[2].present = 0;
14783 inst.instruction = N_MNEM_vqmovn;
14788 constraint (imm < 1 || (unsigned)imm > et.size,
14789 _("immediate out of range"));
14790 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
14794 do_neon_rshift_sat_narrow_u (void)
14796 /* FIXME: Types for narrowing. If operands are signed, results can be signed
14797 or unsigned. If operands are unsigned, results must also be unsigned. */
14798 struct neon_type_el et = neon_check_type (2, NS_DQI,
14799 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
14800 int imm = inst.operands[2].imm;
14801 /* This gets the bounds check, size encoding and immediate bits calculation
14805 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
14806 VQMOVUN.I<size> <Dd>, <Qm>. */
14809 inst.operands[2].present = 0;
14810 inst.instruction = N_MNEM_vqmovun;
14815 constraint (imm < 1 || (unsigned)imm > et.size,
14816 _("immediate out of range"));
14817 /* FIXME: The manual is kind of unclear about what value U should have in
14818 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
14820 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
14824 do_neon_movn (void)
14826 struct neon_type_el et = neon_check_type (2, NS_DQ,
14827 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14828 NEON_ENCODE (INTEGER, inst);
14829 neon_two_same (0, 1, et.size / 2);
14833 do_neon_rshift_narrow (void)
14835 struct neon_type_el et = neon_check_type (2, NS_DQI,
14836 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
14837 int imm = inst.operands[2].imm;
14838 /* This gets the bounds check, size encoding and immediate bits calculation
14842 /* If immediate is zero then we are a pseudo-instruction for
14843 VMOVN.I<size> <Dd>, <Qm> */
14846 inst.operands[2].present = 0;
14847 inst.instruction = N_MNEM_vmovn;
14852 constraint (imm < 1 || (unsigned)imm > et.size,
14853 _("immediate out of range for narrowing operation"));
14854 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
14858 do_neon_shll (void)
14860 /* FIXME: Type checking when lengthening. */
14861 struct neon_type_el et = neon_check_type (2, NS_QDI,
14862 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
14863 unsigned imm = inst.operands[2].imm;
14865 if (imm == et.size)
14867 /* Maximum shift variant. */
14868 NEON_ENCODE (INTEGER, inst);
14869 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14870 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14871 inst.instruction |= LOW4 (inst.operands[1].reg);
14872 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14873 inst.instruction |= neon_logbits (et.size) << 18;
14875 neon_dp_fixup (&inst);
14879 /* A more-specific type check for non-max versions. */
14880 et = neon_check_type (2, NS_QDI,
14881 N_EQK | N_DBL, N_SU_32 | N_KEY);
14882 NEON_ENCODE (IMMED, inst);
14883 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
14887 /* Check the various types for the VCVT instruction, and return which version
14888 the current instruction is. */
14890 #define CVT_FLAVOUR_VAR \
14891 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
14892 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
14893 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
14894 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
14895 /* Half-precision conversions. */ \
14896 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
14897 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
14898 /* VFP instructions. */ \
14899 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
14900 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
14901 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
14902 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
14903 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
14904 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
14905 /* VFP instructions with bitshift. */ \
14906 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
14907 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
14908 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
14909 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
14910 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
14911 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
14912 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
14913 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
14915 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
14916 neon_cvt_flavour_##C,
14918 /* The different types of conversions we can do. */
14919 enum neon_cvt_flavour
14922 neon_cvt_flavour_invalid,
14923 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
14928 static enum neon_cvt_flavour
14929 get_neon_cvt_flavour (enum neon_shape rs)
14931 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
14932 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
14933 if (et.type != NT_invtype) \
14935 inst.error = NULL; \
14936 return (neon_cvt_flavour_##C); \
14939 struct neon_type_el et;
14940 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
14941 || rs == NS_FF) ? N_VFP : 0;
14942 /* The instruction versions which take an immediate take one register
14943 argument, which is extended to the width of the full register. Thus the
14944 "source" and "destination" registers must have the same width. Hack that
14945 here by making the size equal to the key (wider, in this case) operand. */
14946 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
14950 return neon_cvt_flavour_invalid;
14965 /* Neon-syntax VFP conversions. */
14968 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
14970 const char *opname = 0;
14972 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
14974 /* Conversions with immediate bitshift. */
14975 const char *enc[] =
14977 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
14983 if (flavour < (int) ARRAY_SIZE (enc))
14985 opname = enc[flavour];
14986 constraint (inst.operands[0].reg != inst.operands[1].reg,
14987 _("operands 0 and 1 must be the same register"));
14988 inst.operands[1] = inst.operands[2];
14989 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
14994 /* Conversions without bitshift. */
14995 const char *enc[] =
14997 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15003 if (flavour < (int) ARRAY_SIZE (enc))
15004 opname = enc[flavour];
15008 do_vfp_nsyn_opcode (opname);
15012 do_vfp_nsyn_cvtz (void)
15014 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
15015 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15016 const char *enc[] =
15018 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15024 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15025 do_vfp_nsyn_opcode (enc[flavour]);
15029 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15030 enum neon_cvt_mode mode)
15035 set_it_insn_type (OUTSIDE_IT_INSN);
15039 case neon_cvt_flavour_s32_f64:
15043 case neon_cvt_flavour_s32_f32:
15047 case neon_cvt_flavour_u32_f64:
15051 case neon_cvt_flavour_u32_f32:
15056 first_error (_("invalid instruction shape"));
15062 case neon_cvt_mode_a: rm = 0; break;
15063 case neon_cvt_mode_n: rm = 1; break;
15064 case neon_cvt_mode_p: rm = 2; break;
15065 case neon_cvt_mode_m: rm = 3; break;
15066 default: first_error (_("invalid rounding mode")); return;
15069 NEON_ENCODE (FPV8, inst);
15070 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15071 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15072 inst.instruction |= sz << 8;
15073 inst.instruction |= op << 7;
15074 inst.instruction |= rm << 16;
15075 inst.instruction |= 0xf0000000;
15076 inst.is_neon = TRUE;
15080 do_neon_cvt_1 (enum neon_cvt_mode mode)
15082 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15083 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ, NS_NULL);
15084 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15086 /* PR11109: Handle round-to-zero for VCVT conversions. */
15087 if (mode == neon_cvt_mode_z
15088 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15089 && (flavour == neon_cvt_flavour_s32_f32
15090 || flavour == neon_cvt_flavour_u32_f32
15091 || flavour == neon_cvt_flavour_s32_f64
15092 || flavour == neon_cvt_flavour_u32_f64)
15093 && (rs == NS_FD || rs == NS_FF))
15095 do_vfp_nsyn_cvtz ();
15099 /* VFP rather than Neon conversions. */
15100 if (flavour >= neon_cvt_flavour_first_fp)
15102 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15103 do_vfp_nsyn_cvt (rs, flavour);
15105 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15116 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
15118 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15121 /* Fixed-point conversion with #0 immediate is encoded as an
15122 integer conversion. */
15123 if (inst.operands[2].present && inst.operands[2].imm == 0)
15125 immbits = 32 - inst.operands[2].imm;
15126 NEON_ENCODE (IMMED, inst);
15127 if (flavour != neon_cvt_flavour_invalid)
15128 inst.instruction |= enctab[flavour];
15129 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15130 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15131 inst.instruction |= LOW4 (inst.operands[1].reg);
15132 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15133 inst.instruction |= neon_quad (rs) << 6;
15134 inst.instruction |= 1 << 21;
15135 inst.instruction |= immbits << 16;
15137 neon_dp_fixup (&inst);
15143 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15145 NEON_ENCODE (FLOAT, inst);
15146 set_it_insn_type (OUTSIDE_IT_INSN);
15148 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15151 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15152 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15153 inst.instruction |= LOW4 (inst.operands[1].reg);
15154 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15155 inst.instruction |= neon_quad (rs) << 6;
15156 inst.instruction |= (flavour == neon_cvt_flavour_u32_f32) << 7;
15157 inst.instruction |= mode << 8;
15159 inst.instruction |= 0xfc000000;
15161 inst.instruction |= 0xf0000000;
15167 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
15169 NEON_ENCODE (INTEGER, inst);
15171 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15174 if (flavour != neon_cvt_flavour_invalid)
15175 inst.instruction |= enctab[flavour];
15177 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15178 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15179 inst.instruction |= LOW4 (inst.operands[1].reg);
15180 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15181 inst.instruction |= neon_quad (rs) << 6;
15182 inst.instruction |= 2 << 18;
15184 neon_dp_fixup (&inst);
15189 /* Half-precision conversions for Advanced SIMD -- neon. */
15194 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15196 as_bad (_("operand size must match register width"));
15201 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15203 as_bad (_("operand size must match register width"));
15208 inst.instruction = 0x3b60600;
15210 inst.instruction = 0x3b60700;
15212 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15213 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15214 inst.instruction |= LOW4 (inst.operands[1].reg);
15215 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15216 neon_dp_fixup (&inst);
15220 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15221 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15222 do_vfp_nsyn_cvt (rs, flavour);
15224 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15229 do_neon_cvtr (void)
15231 do_neon_cvt_1 (neon_cvt_mode_x);
15237 do_neon_cvt_1 (neon_cvt_mode_z);
15241 do_neon_cvta (void)
15243 do_neon_cvt_1 (neon_cvt_mode_a);
15247 do_neon_cvtn (void)
15249 do_neon_cvt_1 (neon_cvt_mode_n);
15253 do_neon_cvtp (void)
15255 do_neon_cvt_1 (neon_cvt_mode_p);
15259 do_neon_cvtm (void)
15261 do_neon_cvt_1 (neon_cvt_mode_m);
15265 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15268 mark_feature_used (&fpu_vfp_ext_armv8);
15270 encode_arm_vfp_reg (inst.operands[0].reg,
15271 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15272 encode_arm_vfp_reg (inst.operands[1].reg,
15273 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15274 inst.instruction |= to ? 0x10000 : 0;
15275 inst.instruction |= t ? 0x80 : 0;
15276 inst.instruction |= is_double ? 0x100 : 0;
15277 do_vfp_cond_or_thumb ();
15281 do_neon_cvttb_1 (bfd_boolean t)
15283 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_DF, NS_NULL);
15287 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
15290 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
15292 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
15295 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
15297 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
15300 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
15302 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
15305 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
15312 do_neon_cvtb (void)
15314 do_neon_cvttb_1 (FALSE);
15319 do_neon_cvtt (void)
15321 do_neon_cvttb_1 (TRUE);
15325 neon_move_immediate (void)
15327 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
15328 struct neon_type_el et = neon_check_type (2, rs,
15329 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
15330 unsigned immlo, immhi = 0, immbits;
15331 int op, cmode, float_p;
15333 constraint (et.type == NT_invtype,
15334 _("operand size must be specified for immediate VMOV"));
15336 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
15337 op = (inst.instruction & (1 << 5)) != 0;
15339 immlo = inst.operands[1].imm;
15340 if (inst.operands[1].regisimm)
15341 immhi = inst.operands[1].reg;
15343 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
15344 _("immediate has bits set outside the operand size"));
15346 float_p = inst.operands[1].immisfloat;
15348 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
15349 et.size, et.type)) == FAIL)
15351 /* Invert relevant bits only. */
15352 neon_invert_size (&immlo, &immhi, et.size);
15353 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
15354 with one or the other; those cases are caught by
15355 neon_cmode_for_move_imm. */
15357 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
15358 &op, et.size, et.type)) == FAIL)
15360 first_error (_("immediate out of range"));
15365 inst.instruction &= ~(1 << 5);
15366 inst.instruction |= op << 5;
15368 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15369 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15370 inst.instruction |= neon_quad (rs) << 6;
15371 inst.instruction |= cmode << 8;
15373 neon_write_immbits (immbits);
15379 if (inst.operands[1].isreg)
15381 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15383 NEON_ENCODE (INTEGER, inst);
15384 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15385 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15386 inst.instruction |= LOW4 (inst.operands[1].reg);
15387 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15388 inst.instruction |= neon_quad (rs) << 6;
15392 NEON_ENCODE (IMMED, inst);
15393 neon_move_immediate ();
15396 neon_dp_fixup (&inst);
15399 /* Encode instructions of form:
15401 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
15402 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
15405 neon_mixed_length (struct neon_type_el et, unsigned size)
15407 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15408 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15409 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15410 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15411 inst.instruction |= LOW4 (inst.operands[2].reg);
15412 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15413 inst.instruction |= (et.type == NT_unsigned) << 24;
15414 inst.instruction |= neon_logbits (size) << 20;
15416 neon_dp_fixup (&inst);
15420 do_neon_dyadic_long (void)
15422 /* FIXME: Type checking for lengthening op. */
15423 struct neon_type_el et = neon_check_type (3, NS_QDD,
15424 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
15425 neon_mixed_length (et, et.size);
15429 do_neon_abal (void)
15431 struct neon_type_el et = neon_check_type (3, NS_QDD,
15432 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
15433 neon_mixed_length (et, et.size);
15437 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
15439 if (inst.operands[2].isscalar)
15441 struct neon_type_el et = neon_check_type (3, NS_QDS,
15442 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
15443 NEON_ENCODE (SCALAR, inst);
15444 neon_mul_mac (et, et.type == NT_unsigned);
15448 struct neon_type_el et = neon_check_type (3, NS_QDD,
15449 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
15450 NEON_ENCODE (INTEGER, inst);
15451 neon_mixed_length (et, et.size);
15456 do_neon_mac_maybe_scalar_long (void)
15458 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
15462 do_neon_dyadic_wide (void)
15464 struct neon_type_el et = neon_check_type (3, NS_QQD,
15465 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
15466 neon_mixed_length (et, et.size);
15470 do_neon_dyadic_narrow (void)
15472 struct neon_type_el et = neon_check_type (3, NS_QDD,
15473 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
15474 /* Operand sign is unimportant, and the U bit is part of the opcode,
15475 so force the operand type to integer. */
15476 et.type = NT_integer;
15477 neon_mixed_length (et, et.size / 2);
15481 do_neon_mul_sat_scalar_long (void)
15483 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
15487 do_neon_vmull (void)
15489 if (inst.operands[2].isscalar)
15490 do_neon_mac_maybe_scalar_long ();
15493 struct neon_type_el et = neon_check_type (3, NS_QDD,
15494 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
15496 if (et.type == NT_poly)
15497 NEON_ENCODE (POLY, inst);
15499 NEON_ENCODE (INTEGER, inst);
15501 /* For polynomial encoding the U bit must be zero, and the size must
15502 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
15503 obviously, as 0b10). */
15506 /* Check we're on the correct architecture. */
15507 if (!mark_feature_used (&fpu_crypto_ext_armv8))
15509 _("Instruction form not available on this architecture.");
15514 neon_mixed_length (et, et.size);
15521 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
15522 struct neon_type_el et = neon_check_type (3, rs,
15523 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15524 unsigned imm = (inst.operands[3].imm * et.size) / 8;
15526 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
15527 _("shift out of range"));
15528 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15529 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15530 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15531 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15532 inst.instruction |= LOW4 (inst.operands[2].reg);
15533 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15534 inst.instruction |= neon_quad (rs) << 6;
15535 inst.instruction |= imm << 8;
15537 neon_dp_fixup (&inst);
15543 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15544 struct neon_type_el et = neon_check_type (2, rs,
15545 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15546 unsigned op = (inst.instruction >> 7) & 3;
15547 /* N (width of reversed regions) is encoded as part of the bitmask. We
15548 extract it here to check the elements to be reversed are smaller.
15549 Otherwise we'd get a reserved instruction. */
15550 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
15551 gas_assert (elsize != 0);
15552 constraint (et.size >= elsize,
15553 _("elements must be smaller than reversal region"));
15554 neon_two_same (neon_quad (rs), 1, et.size);
15560 if (inst.operands[1].isscalar)
15562 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
15563 struct neon_type_el et = neon_check_type (2, rs,
15564 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15565 unsigned sizebits = et.size >> 3;
15566 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
15567 int logsize = neon_logbits (et.size);
15568 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
15570 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
15573 NEON_ENCODE (SCALAR, inst);
15574 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15575 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15576 inst.instruction |= LOW4 (dm);
15577 inst.instruction |= HI1 (dm) << 5;
15578 inst.instruction |= neon_quad (rs) << 6;
15579 inst.instruction |= x << 17;
15580 inst.instruction |= sizebits << 16;
15582 neon_dp_fixup (&inst);
15586 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
15587 struct neon_type_el et = neon_check_type (2, rs,
15588 N_8 | N_16 | N_32 | N_KEY, N_EQK);
15589 /* Duplicate ARM register to lanes of vector. */
15590 NEON_ENCODE (ARMREG, inst);
15593 case 8: inst.instruction |= 0x400000; break;
15594 case 16: inst.instruction |= 0x000020; break;
15595 case 32: inst.instruction |= 0x000000; break;
15598 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
15599 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
15600 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
15601 inst.instruction |= neon_quad (rs) << 21;
15602 /* The encoding for this instruction is identical for the ARM and Thumb
15603 variants, except for the condition field. */
15604 do_vfp_cond_or_thumb ();
15608 /* VMOV has particularly many variations. It can be one of:
15609 0. VMOV<c><q> <Qd>, <Qm>
15610 1. VMOV<c><q> <Dd>, <Dm>
15611 (Register operations, which are VORR with Rm = Rn.)
15612 2. VMOV<c><q>.<dt> <Qd>, #<imm>
15613 3. VMOV<c><q>.<dt> <Dd>, #<imm>
15615 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
15616 (ARM register to scalar.)
15617 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
15618 (Two ARM registers to vector.)
15619 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
15620 (Scalar to ARM register.)
15621 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
15622 (Vector to two ARM registers.)
15623 8. VMOV.F32 <Sd>, <Sm>
15624 9. VMOV.F64 <Dd>, <Dm>
15625 (VFP register moves.)
15626 10. VMOV.F32 <Sd>, #imm
15627 11. VMOV.F64 <Dd>, #imm
15628 (VFP float immediate load.)
15629 12. VMOV <Rd>, <Sm>
15630 (VFP single to ARM reg.)
15631 13. VMOV <Sd>, <Rm>
15632 (ARM reg to VFP single.)
15633 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
15634 (Two ARM regs to two VFP singles.)
15635 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
15636 (Two VFP singles to two ARM regs.)
15638 These cases can be disambiguated using neon_select_shape, except cases 1/9
15639 and 3/11 which depend on the operand type too.
15641 All the encoded bits are hardcoded by this function.
15643 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
15644 Cases 5, 7 may be used with VFPv2 and above.
15646 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
15647 can specify a type where it doesn't make sense to, and is ignored). */
15652 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
15653 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
15655 struct neon_type_el et;
15656 const char *ldconst = 0;
15660 case NS_DD: /* case 1/9. */
15661 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15662 /* It is not an error here if no type is given. */
15664 if (et.type == NT_float && et.size == 64)
15666 do_vfp_nsyn_opcode ("fcpyd");
15669 /* fall through. */
15671 case NS_QQ: /* case 0/1. */
15673 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15675 /* The architecture manual I have doesn't explicitly state which
15676 value the U bit should have for register->register moves, but
15677 the equivalent VORR instruction has U = 0, so do that. */
15678 inst.instruction = 0x0200110;
15679 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15680 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15681 inst.instruction |= LOW4 (inst.operands[1].reg);
15682 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15683 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15684 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15685 inst.instruction |= neon_quad (rs) << 6;
15687 neon_dp_fixup (&inst);
15691 case NS_DI: /* case 3/11. */
15692 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
15694 if (et.type == NT_float && et.size == 64)
15696 /* case 11 (fconstd). */
15697 ldconst = "fconstd";
15698 goto encode_fconstd;
15700 /* fall through. */
15702 case NS_QI: /* case 2/3. */
15703 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15705 inst.instruction = 0x0800010;
15706 neon_move_immediate ();
15707 neon_dp_fixup (&inst);
15710 case NS_SR: /* case 4. */
15712 unsigned bcdebits = 0;
15714 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
15715 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
15717 /* .<size> is optional here, defaulting to .32. */
15718 if (inst.vectype.elems == 0
15719 && inst.operands[0].vectype.type == NT_invtype
15720 && inst.operands[1].vectype.type == NT_invtype)
15722 inst.vectype.el[0].type = NT_untyped;
15723 inst.vectype.el[0].size = 32;
15724 inst.vectype.elems = 1;
15727 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
15728 logsize = neon_logbits (et.size);
15730 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15732 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15733 && et.size != 32, _(BAD_FPU));
15734 constraint (et.type == NT_invtype, _("bad type for scalar"));
15735 constraint (x >= 64 / et.size, _("scalar index out of range"));
15739 case 8: bcdebits = 0x8; break;
15740 case 16: bcdebits = 0x1; break;
15741 case 32: bcdebits = 0x0; break;
15745 bcdebits |= x << logsize;
15747 inst.instruction = 0xe000b10;
15748 do_vfp_cond_or_thumb ();
15749 inst.instruction |= LOW4 (dn) << 16;
15750 inst.instruction |= HI1 (dn) << 7;
15751 inst.instruction |= inst.operands[1].reg << 12;
15752 inst.instruction |= (bcdebits & 3) << 5;
15753 inst.instruction |= (bcdebits >> 2) << 21;
15757 case NS_DRR: /* case 5 (fmdrr). */
15758 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15761 inst.instruction = 0xc400b10;
15762 do_vfp_cond_or_thumb ();
15763 inst.instruction |= LOW4 (inst.operands[0].reg);
15764 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
15765 inst.instruction |= inst.operands[1].reg << 12;
15766 inst.instruction |= inst.operands[2].reg << 16;
15769 case NS_RS: /* case 6. */
15772 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
15773 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
15774 unsigned abcdebits = 0;
15776 /* .<dt> is optional here, defaulting to .32. */
15777 if (inst.vectype.elems == 0
15778 && inst.operands[0].vectype.type == NT_invtype
15779 && inst.operands[1].vectype.type == NT_invtype)
15781 inst.vectype.el[0].type = NT_untyped;
15782 inst.vectype.el[0].size = 32;
15783 inst.vectype.elems = 1;
15786 et = neon_check_type (2, NS_NULL,
15787 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
15788 logsize = neon_logbits (et.size);
15790 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
15792 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
15793 && et.size != 32, _(BAD_FPU));
15794 constraint (et.type == NT_invtype, _("bad type for scalar"));
15795 constraint (x >= 64 / et.size, _("scalar index out of range"));
15799 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
15800 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
15801 case 32: abcdebits = 0x00; break;
15805 abcdebits |= x << logsize;
15806 inst.instruction = 0xe100b10;
15807 do_vfp_cond_or_thumb ();
15808 inst.instruction |= LOW4 (dn) << 16;
15809 inst.instruction |= HI1 (dn) << 7;
15810 inst.instruction |= inst.operands[0].reg << 12;
15811 inst.instruction |= (abcdebits & 3) << 5;
15812 inst.instruction |= (abcdebits >> 2) << 21;
15816 case NS_RRD: /* case 7 (fmrrd). */
15817 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
15820 inst.instruction = 0xc500b10;
15821 do_vfp_cond_or_thumb ();
15822 inst.instruction |= inst.operands[0].reg << 12;
15823 inst.instruction |= inst.operands[1].reg << 16;
15824 inst.instruction |= LOW4 (inst.operands[2].reg);
15825 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
15828 case NS_FF: /* case 8 (fcpys). */
15829 do_vfp_nsyn_opcode ("fcpys");
15832 case NS_FI: /* case 10 (fconsts). */
15833 ldconst = "fconsts";
15835 if (is_quarter_float (inst.operands[1].imm))
15837 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
15838 do_vfp_nsyn_opcode (ldconst);
15841 first_error (_("immediate out of range"));
15844 case NS_RF: /* case 12 (fmrs). */
15845 do_vfp_nsyn_opcode ("fmrs");
15848 case NS_FR: /* case 13 (fmsr). */
15849 do_vfp_nsyn_opcode ("fmsr");
15852 /* The encoders for the fmrrs and fmsrr instructions expect three operands
15853 (one of which is a list), but we have parsed four. Do some fiddling to
15854 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
15856 case NS_RRFF: /* case 14 (fmrrs). */
15857 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
15858 _("VFP registers must be adjacent"));
15859 inst.operands[2].imm = 2;
15860 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15861 do_vfp_nsyn_opcode ("fmrrs");
15864 case NS_FFRR: /* case 15 (fmsrr). */
15865 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
15866 _("VFP registers must be adjacent"));
15867 inst.operands[1] = inst.operands[2];
15868 inst.operands[2] = inst.operands[3];
15869 inst.operands[0].imm = 2;
15870 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
15871 do_vfp_nsyn_opcode ("fmsrr");
15875 /* neon_select_shape has determined that the instruction
15876 shape is wrong and has already set the error message. */
15885 do_neon_rshift_round_imm (void)
15887 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15888 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
15889 int imm = inst.operands[2].imm;
15891 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
15894 inst.operands[2].present = 0;
15899 constraint (imm < 1 || (unsigned)imm > et.size,
15900 _("immediate out of range for shift"));
15901 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
15906 do_neon_movl (void)
15908 struct neon_type_el et = neon_check_type (2, NS_QD,
15909 N_EQK | N_DBL, N_SU_32 | N_KEY);
15910 unsigned sizebits = et.size >> 3;
15911 inst.instruction |= sizebits << 19;
15912 neon_two_same (0, et.type == NT_unsigned, -1);
15918 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15919 struct neon_type_el et = neon_check_type (2, rs,
15920 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15921 NEON_ENCODE (INTEGER, inst);
15922 neon_two_same (neon_quad (rs), 1, et.size);
15926 do_neon_zip_uzp (void)
15928 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15929 struct neon_type_el et = neon_check_type (2, rs,
15930 N_EQK, N_8 | N_16 | N_32 | N_KEY);
15931 if (rs == NS_DD && et.size == 32)
15933 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
15934 inst.instruction = N_MNEM_vtrn;
15938 neon_two_same (neon_quad (rs), 1, et.size);
15942 do_neon_sat_abs_neg (void)
15944 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15945 struct neon_type_el et = neon_check_type (2, rs,
15946 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15947 neon_two_same (neon_quad (rs), 1, et.size);
15951 do_neon_pair_long (void)
15953 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15954 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
15955 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
15956 inst.instruction |= (et.type == NT_unsigned) << 7;
15957 neon_two_same (neon_quad (rs), 1, et.size);
15961 do_neon_recip_est (void)
15963 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15964 struct neon_type_el et = neon_check_type (2, rs,
15965 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
15966 inst.instruction |= (et.type == NT_float) << 8;
15967 neon_two_same (neon_quad (rs), 1, et.size);
15973 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15974 struct neon_type_el et = neon_check_type (2, rs,
15975 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
15976 neon_two_same (neon_quad (rs), 1, et.size);
15982 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15983 struct neon_type_el et = neon_check_type (2, rs,
15984 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
15985 neon_two_same (neon_quad (rs), 1, et.size);
15991 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15992 struct neon_type_el et = neon_check_type (2, rs,
15993 N_EQK | N_INT, N_8 | N_KEY);
15994 neon_two_same (neon_quad (rs), 1, et.size);
16000 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16001 neon_two_same (neon_quad (rs), 1, -1);
16005 do_neon_tbl_tbx (void)
16007 unsigned listlenbits;
16008 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16010 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16012 first_error (_("bad list length for table lookup"));
16016 listlenbits = inst.operands[1].imm - 1;
16017 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16018 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16019 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16020 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16021 inst.instruction |= LOW4 (inst.operands[2].reg);
16022 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16023 inst.instruction |= listlenbits << 8;
16025 neon_dp_fixup (&inst);
16029 do_neon_ldm_stm (void)
16031 /* P, U and L bits are part of bitmask. */
16032 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16033 unsigned offsetbits = inst.operands[1].imm * 2;
16035 if (inst.operands[1].issingle)
16037 do_vfp_nsyn_ldm_stm (is_dbmode);
16041 constraint (is_dbmode && !inst.operands[0].writeback,
16042 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16044 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16045 _("register list must contain at least 1 and at most 16 "
16048 inst.instruction |= inst.operands[0].reg << 16;
16049 inst.instruction |= inst.operands[0].writeback << 21;
16050 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16051 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16053 inst.instruction |= offsetbits;
16055 do_vfp_cond_or_thumb ();
16059 do_neon_ldr_str (void)
16061 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16063 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16064 And is UNPREDICTABLE in thumb mode. */
16066 && inst.operands[1].reg == REG_PC
16067 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16070 inst.error = _("Use of PC here is UNPREDICTABLE");
16071 else if (warn_on_deprecated)
16072 as_warn (_("Use of PC here is deprecated"));
16075 if (inst.operands[0].issingle)
16078 do_vfp_nsyn_opcode ("flds");
16080 do_vfp_nsyn_opcode ("fsts");
16085 do_vfp_nsyn_opcode ("fldd");
16087 do_vfp_nsyn_opcode ("fstd");
16091 /* "interleave" version also handles non-interleaving register VLD1/VST1
16095 do_neon_ld_st_interleave (void)
16097 struct neon_type_el et = neon_check_type (1, NS_NULL,
16098 N_8 | N_16 | N_32 | N_64);
16099 unsigned alignbits = 0;
16101 /* The bits in this table go:
16102 0: register stride of one (0) or two (1)
16103 1,2: register list length, minus one (1, 2, 3, 4).
16104 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
16105 We use -1 for invalid entries. */
16106 const int typetable[] =
16108 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
16109 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
16110 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
16111 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
16115 if (et.type == NT_invtype)
16118 if (inst.operands[1].immisalign)
16119 switch (inst.operands[1].imm >> 8)
16121 case 64: alignbits = 1; break;
16123 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
16124 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16125 goto bad_alignment;
16129 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
16130 goto bad_alignment;
16135 first_error (_("bad alignment"));
16139 inst.instruction |= alignbits << 4;
16140 inst.instruction |= neon_logbits (et.size) << 6;
16142 /* Bits [4:6] of the immediate in a list specifier encode register stride
16143 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
16144 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
16145 up the right value for "type" in a table based on this value and the given
16146 list style, then stick it back. */
16147 idx = ((inst.operands[0].imm >> 4) & 7)
16148 | (((inst.instruction >> 8) & 3) << 3);
16150 typebits = typetable[idx];
16152 constraint (typebits == -1, _("bad list type for instruction"));
16153 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
16154 _("bad element type for instruction"));
16156 inst.instruction &= ~0xf00;
16157 inst.instruction |= typebits << 8;
16160 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
16161 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
16162 otherwise. The variable arguments are a list of pairs of legal (size, align)
16163 values, terminated with -1. */
16166 neon_alignment_bit (int size, int align, int *do_align, ...)
16169 int result = FAIL, thissize, thisalign;
16171 if (!inst.operands[1].immisalign)
16177 va_start (ap, do_align);
16181 thissize = va_arg (ap, int);
16182 if (thissize == -1)
16184 thisalign = va_arg (ap, int);
16186 if (size == thissize && align == thisalign)
16189 while (result != SUCCESS);
16193 if (result == SUCCESS)
16196 first_error (_("unsupported alignment for instruction"));
16202 do_neon_ld_st_lane (void)
16204 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16205 int align_good, do_align = 0;
16206 int logsize = neon_logbits (et.size);
16207 int align = inst.operands[1].imm >> 8;
16208 int n = (inst.instruction >> 8) & 3;
16209 int max_el = 64 / et.size;
16211 if (et.type == NT_invtype)
16214 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
16215 _("bad list length"));
16216 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
16217 _("scalar index out of range"));
16218 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
16220 _("stride of 2 unavailable when element size is 8"));
16224 case 0: /* VLD1 / VST1. */
16225 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
16227 if (align_good == FAIL)
16231 unsigned alignbits = 0;
16234 case 16: alignbits = 0x1; break;
16235 case 32: alignbits = 0x3; break;
16238 inst.instruction |= alignbits << 4;
16242 case 1: /* VLD2 / VST2. */
16243 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
16245 if (align_good == FAIL)
16248 inst.instruction |= 1 << 4;
16251 case 2: /* VLD3 / VST3. */
16252 constraint (inst.operands[1].immisalign,
16253 _("can't use alignment with this instruction"));
16256 case 3: /* VLD4 / VST4. */
16257 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16258 16, 64, 32, 64, 32, 128, -1);
16259 if (align_good == FAIL)
16263 unsigned alignbits = 0;
16266 case 8: alignbits = 0x1; break;
16267 case 16: alignbits = 0x1; break;
16268 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
16271 inst.instruction |= alignbits << 4;
16278 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
16279 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16280 inst.instruction |= 1 << (4 + logsize);
16282 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
16283 inst.instruction |= logsize << 10;
16286 /* Encode single n-element structure to all lanes VLD<n> instructions. */
16289 do_neon_ld_dup (void)
16291 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
16292 int align_good, do_align = 0;
16294 if (et.type == NT_invtype)
16297 switch ((inst.instruction >> 8) & 3)
16299 case 0: /* VLD1. */
16300 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
16301 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16302 &do_align, 16, 16, 32, 32, -1);
16303 if (align_good == FAIL)
16305 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
16308 case 2: inst.instruction |= 1 << 5; break;
16309 default: first_error (_("bad list length")); return;
16311 inst.instruction |= neon_logbits (et.size) << 6;
16314 case 1: /* VLD2. */
16315 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
16316 &do_align, 8, 16, 16, 32, 32, 64, -1);
16317 if (align_good == FAIL)
16319 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
16320 _("bad list length"));
16321 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16322 inst.instruction |= 1 << 5;
16323 inst.instruction |= neon_logbits (et.size) << 6;
16326 case 2: /* VLD3. */
16327 constraint (inst.operands[1].immisalign,
16328 _("can't use alignment with this instruction"));
16329 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
16330 _("bad list length"));
16331 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16332 inst.instruction |= 1 << 5;
16333 inst.instruction |= neon_logbits (et.size) << 6;
16336 case 3: /* VLD4. */
16338 int align = inst.operands[1].imm >> 8;
16339 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
16340 16, 64, 32, 64, 32, 128, -1);
16341 if (align_good == FAIL)
16343 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
16344 _("bad list length"));
16345 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
16346 inst.instruction |= 1 << 5;
16347 if (et.size == 32 && align == 128)
16348 inst.instruction |= 0x3 << 6;
16350 inst.instruction |= neon_logbits (et.size) << 6;
16357 inst.instruction |= do_align << 4;
16360 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
16361 apart from bits [11:4]. */
16364 do_neon_ldx_stx (void)
16366 if (inst.operands[1].isreg)
16367 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16369 switch (NEON_LANE (inst.operands[0].imm))
16371 case NEON_INTERLEAVE_LANES:
16372 NEON_ENCODE (INTERLV, inst);
16373 do_neon_ld_st_interleave ();
16376 case NEON_ALL_LANES:
16377 NEON_ENCODE (DUP, inst);
16378 if (inst.instruction == N_INV)
16380 first_error ("only loads support such operands");
16387 NEON_ENCODE (LANE, inst);
16388 do_neon_ld_st_lane ();
16391 /* L bit comes from bit mask. */
16392 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16393 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16394 inst.instruction |= inst.operands[1].reg << 16;
16396 if (inst.operands[1].postind)
16398 int postreg = inst.operands[1].imm & 0xf;
16399 constraint (!inst.operands[1].immisreg,
16400 _("post-index must be a register"));
16401 constraint (postreg == 0xd || postreg == 0xf,
16402 _("bad register for post-index"));
16403 inst.instruction |= postreg;
16407 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
16408 constraint (inst.reloc.exp.X_op != O_constant
16409 || inst.reloc.exp.X_add_number != 0,
16412 if (inst.operands[1].writeback)
16414 inst.instruction |= 0xd;
16417 inst.instruction |= 0xf;
16421 inst.instruction |= 0xf9000000;
16423 inst.instruction |= 0xf4000000;
16428 do_vfp_nsyn_fpv8 (enum neon_shape rs)
16430 NEON_ENCODE (FPV8, inst);
16433 do_vfp_sp_dyadic ();
16435 do_vfp_dp_rd_rn_rm ();
16438 inst.instruction |= 0x100;
16440 inst.instruction |= 0xf0000000;
16446 set_it_insn_type (OUTSIDE_IT_INSN);
16448 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
16449 first_error (_("invalid instruction shape"));
16455 set_it_insn_type (OUTSIDE_IT_INSN);
16457 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
16460 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16463 neon_dyadic_misc (NT_untyped, N_F32, 0);
16467 do_vrint_1 (enum neon_cvt_mode mode)
16469 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_QQ, NS_NULL);
16470 struct neon_type_el et;
16475 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
16476 if (et.type != NT_invtype)
16478 /* VFP encodings. */
16479 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
16480 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
16481 set_it_insn_type (OUTSIDE_IT_INSN);
16483 NEON_ENCODE (FPV8, inst);
16485 do_vfp_sp_monadic ();
16487 do_vfp_dp_rd_rm ();
16491 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
16492 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
16493 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
16494 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
16495 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
16496 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
16497 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
16501 inst.instruction |= (rs == NS_DD) << 8;
16502 do_vfp_cond_or_thumb ();
16506 /* Neon encodings (or something broken...). */
16508 et = neon_check_type (2, rs, N_EQK, N_F32 | N_KEY);
16510 if (et.type == NT_invtype)
16513 set_it_insn_type (OUTSIDE_IT_INSN);
16514 NEON_ENCODE (FLOAT, inst);
16516 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
16519 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16520 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16521 inst.instruction |= LOW4 (inst.operands[1].reg);
16522 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16523 inst.instruction |= neon_quad (rs) << 6;
16526 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
16527 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
16528 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
16529 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
16530 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
16531 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
16532 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
16537 inst.instruction |= 0xfc000000;
16539 inst.instruction |= 0xf0000000;
16546 do_vrint_1 (neon_cvt_mode_x);
16552 do_vrint_1 (neon_cvt_mode_z);
16558 do_vrint_1 (neon_cvt_mode_r);
16564 do_vrint_1 (neon_cvt_mode_a);
16570 do_vrint_1 (neon_cvt_mode_n);
16576 do_vrint_1 (neon_cvt_mode_p);
16582 do_vrint_1 (neon_cvt_mode_m);
16585 /* Crypto v1 instructions. */
16587 do_crypto_2op_1 (unsigned elttype, int op)
16589 set_it_insn_type (OUTSIDE_IT_INSN);
16591 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
16597 NEON_ENCODE (INTEGER, inst);
16598 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16599 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16600 inst.instruction |= LOW4 (inst.operands[1].reg);
16601 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16603 inst.instruction |= op << 6;
16606 inst.instruction |= 0xfc000000;
16608 inst.instruction |= 0xf0000000;
16612 do_crypto_3op_1 (int u, int op)
16614 set_it_insn_type (OUTSIDE_IT_INSN);
16616 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
16617 N_32 | N_UNT | N_KEY).type == NT_invtype)
16622 NEON_ENCODE (INTEGER, inst);
16623 neon_three_same (1, u, 8 << op);
16629 do_crypto_2op_1 (N_8, 0);
16635 do_crypto_2op_1 (N_8, 1);
16641 do_crypto_2op_1 (N_8, 2);
16647 do_crypto_2op_1 (N_8, 3);
16653 do_crypto_3op_1 (0, 0);
16659 do_crypto_3op_1 (0, 1);
16665 do_crypto_3op_1 (0, 2);
16671 do_crypto_3op_1 (0, 3);
16677 do_crypto_3op_1 (1, 0);
16683 do_crypto_3op_1 (1, 1);
16687 do_sha256su1 (void)
16689 do_crypto_3op_1 (1, 2);
16695 do_crypto_2op_1 (N_32, -1);
16701 do_crypto_2op_1 (N_32, 0);
16705 do_sha256su0 (void)
16707 do_crypto_2op_1 (N_32, 1);
16711 do_crc32_1 (unsigned int poly, unsigned int sz)
16713 unsigned int Rd = inst.operands[0].reg;
16714 unsigned int Rn = inst.operands[1].reg;
16715 unsigned int Rm = inst.operands[2].reg;
16717 set_it_insn_type (OUTSIDE_IT_INSN);
16718 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
16719 inst.instruction |= LOW4 (Rn) << 16;
16720 inst.instruction |= LOW4 (Rm);
16721 inst.instruction |= sz << (thumb_mode ? 4 : 21);
16722 inst.instruction |= poly << (thumb_mode ? 20 : 9);
16724 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
16725 as_warn (UNPRED_REG ("r15"));
16726 if (thumb_mode && (Rd == REG_SP || Rn == REG_SP || Rm == REG_SP))
16727 as_warn (UNPRED_REG ("r13"));
16767 /* Overall per-instruction processing. */
16769 /* We need to be able to fix up arbitrary expressions in some statements.
16770 This is so that we can handle symbols that are an arbitrary distance from
16771 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
16772 which returns part of an address in a form which will be valid for
16773 a data instruction. We do this by pushing the expression into a symbol
16774 in the expr_section, and creating a fix for that. */
16777 fix_new_arm (fragS * frag,
16791 /* Create an absolute valued symbol, so we have something to
16792 refer to in the object file. Unfortunately for us, gas's
16793 generic expression parsing will already have folded out
16794 any use of .set foo/.type foo %function that may have
16795 been used to set type information of the target location,
16796 that's being specified symbolically. We have to presume
16797 the user knows what they are doing. */
16801 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
16803 symbol = symbol_find_or_make (name);
16804 S_SET_SEGMENT (symbol, absolute_section);
16805 symbol_set_frag (symbol, &zero_address_frag);
16806 S_SET_VALUE (symbol, exp->X_add_number);
16807 exp->X_op = O_symbol;
16808 exp->X_add_symbol = symbol;
16809 exp->X_add_number = 0;
16815 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
16816 (enum bfd_reloc_code_real) reloc);
16820 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
16821 pc_rel, (enum bfd_reloc_code_real) reloc);
16825 /* Mark whether the fix is to a THUMB instruction, or an ARM
16827 new_fix->tc_fix_data = thumb_mode;
16830 /* Create a frg for an instruction requiring relaxation. */
16832 output_relax_insn (void)
16838 /* The size of the instruction is unknown, so tie the debug info to the
16839 start of the instruction. */
16840 dwarf2_emit_insn (0);
16842 switch (inst.reloc.exp.X_op)
16845 sym = inst.reloc.exp.X_add_symbol;
16846 offset = inst.reloc.exp.X_add_number;
16850 offset = inst.reloc.exp.X_add_number;
16853 sym = make_expr_symbol (&inst.reloc.exp);
16857 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
16858 inst.relax, sym, offset, NULL/*offset, opcode*/);
16859 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
16862 /* Write a 32-bit thumb instruction to buf. */
16864 put_thumb32_insn (char * buf, unsigned long insn)
16866 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
16867 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
16871 output_inst (const char * str)
16877 as_bad ("%s -- `%s'", inst.error, str);
16882 output_relax_insn ();
16885 if (inst.size == 0)
16888 to = frag_more (inst.size);
16889 /* PR 9814: Record the thumb mode into the current frag so that we know
16890 what type of NOP padding to use, if necessary. We override any previous
16891 setting so that if the mode has changed then the NOPS that we use will
16892 match the encoding of the last instruction in the frag. */
16893 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
16895 if (thumb_mode && (inst.size > THUMB_SIZE))
16897 gas_assert (inst.size == (2 * THUMB_SIZE));
16898 put_thumb32_insn (to, inst.instruction);
16900 else if (inst.size > INSN_SIZE)
16902 gas_assert (inst.size == (2 * INSN_SIZE));
16903 md_number_to_chars (to, inst.instruction, INSN_SIZE);
16904 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
16907 md_number_to_chars (to, inst.instruction, inst.size);
16909 if (inst.reloc.type != BFD_RELOC_UNUSED)
16910 fix_new_arm (frag_now, to - frag_now->fr_literal,
16911 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
16914 dwarf2_emit_insn (inst.size);
16918 output_it_inst (int cond, int mask, char * to)
16920 unsigned long instruction = 0xbf00;
16923 instruction |= mask;
16924 instruction |= cond << 4;
16928 to = frag_more (2);
16930 dwarf2_emit_insn (2);
16934 md_number_to_chars (to, instruction, 2);
16939 /* Tag values used in struct asm_opcode's tag field. */
16942 OT_unconditional, /* Instruction cannot be conditionalized.
16943 The ARM condition field is still 0xE. */
16944 OT_unconditionalF, /* Instruction cannot be conditionalized
16945 and carries 0xF in its ARM condition field. */
16946 OT_csuffix, /* Instruction takes a conditional suffix. */
16947 OT_csuffixF, /* Some forms of the instruction take a conditional
16948 suffix, others place 0xF where the condition field
16950 OT_cinfix3, /* Instruction takes a conditional infix,
16951 beginning at character index 3. (In
16952 unified mode, it becomes a suffix.) */
16953 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
16954 tsts, cmps, cmns, and teqs. */
16955 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
16956 character index 3, even in unified mode. Used for
16957 legacy instructions where suffix and infix forms
16958 may be ambiguous. */
16959 OT_csuf_or_in3, /* Instruction takes either a conditional
16960 suffix or an infix at character index 3. */
16961 OT_odd_infix_unc, /* This is the unconditional variant of an
16962 instruction that takes a conditional infix
16963 at an unusual position. In unified mode,
16964 this variant will accept a suffix. */
16965 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
16966 are the conditional variants of instructions that
16967 take conditional infixes in unusual positions.
16968 The infix appears at character index
16969 (tag - OT_odd_infix_0). These are not accepted
16970 in unified mode. */
16973 /* Subroutine of md_assemble, responsible for looking up the primary
16974 opcode from the mnemonic the user wrote. STR points to the
16975 beginning of the mnemonic.
16977 This is not simply a hash table lookup, because of conditional
16978 variants. Most instructions have conditional variants, which are
16979 expressed with a _conditional affix_ to the mnemonic. If we were
16980 to encode each conditional variant as a literal string in the opcode
16981 table, it would have approximately 20,000 entries.
16983 Most mnemonics take this affix as a suffix, and in unified syntax,
16984 'most' is upgraded to 'all'. However, in the divided syntax, some
16985 instructions take the affix as an infix, notably the s-variants of
16986 the arithmetic instructions. Of those instructions, all but six
16987 have the infix appear after the third character of the mnemonic.
16989 Accordingly, the algorithm for looking up primary opcodes given
16992 1. Look up the identifier in the opcode table.
16993 If we find a match, go to step U.
16995 2. Look up the last two characters of the identifier in the
16996 conditions table. If we find a match, look up the first N-2
16997 characters of the identifier in the opcode table. If we
16998 find a match, go to step CE.
17000 3. Look up the fourth and fifth characters of the identifier in
17001 the conditions table. If we find a match, extract those
17002 characters from the identifier, and look up the remaining
17003 characters in the opcode table. If we find a match, go
17008 U. Examine the tag field of the opcode structure, in case this is
17009 one of the six instructions with its conditional infix in an
17010 unusual place. If it is, the tag tells us where to find the
17011 infix; look it up in the conditions table and set inst.cond
17012 accordingly. Otherwise, this is an unconditional instruction.
17013 Again set inst.cond accordingly. Return the opcode structure.
17015 CE. Examine the tag field to make sure this is an instruction that
17016 should receive a conditional suffix. If it is not, fail.
17017 Otherwise, set inst.cond from the suffix we already looked up,
17018 and return the opcode structure.
17020 CM. Examine the tag field to make sure this is an instruction that
17021 should receive a conditional infix after the third character.
17022 If it is not, fail. Otherwise, undo the edits to the current
17023 line of input and proceed as for case CE. */
17025 static const struct asm_opcode *
17026 opcode_lookup (char **str)
17030 const struct asm_opcode *opcode;
17031 const struct asm_cond *cond;
17034 /* Scan up to the end of the mnemonic, which must end in white space,
17035 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
17036 for (base = end = *str; *end != '\0'; end++)
17037 if (*end == ' ' || *end == '.')
17043 /* Handle a possible width suffix and/or Neon type suffix. */
17048 /* The .w and .n suffixes are only valid if the unified syntax is in
17050 if (unified_syntax && end[1] == 'w')
17052 else if (unified_syntax && end[1] == 'n')
17057 inst.vectype.elems = 0;
17059 *str = end + offset;
17061 if (end[offset] == '.')
17063 /* See if we have a Neon type suffix (possible in either unified or
17064 non-unified ARM syntax mode). */
17065 if (parse_neon_type (&inst.vectype, str) == FAIL)
17068 else if (end[offset] != '\0' && end[offset] != ' ')
17074 /* Look for unaffixed or special-case affixed mnemonic. */
17075 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17080 if (opcode->tag < OT_odd_infix_0)
17082 inst.cond = COND_ALWAYS;
17086 if (warn_on_deprecated && unified_syntax)
17087 as_warn (_("conditional infixes are deprecated in unified syntax"));
17088 affix = base + (opcode->tag - OT_odd_infix_0);
17089 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17092 inst.cond = cond->value;
17096 /* Cannot have a conditional suffix on a mnemonic of less than two
17098 if (end - base < 3)
17101 /* Look for suffixed mnemonic. */
17103 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17104 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17106 if (opcode && cond)
17109 switch (opcode->tag)
17111 case OT_cinfix3_legacy:
17112 /* Ignore conditional suffixes matched on infix only mnemonics. */
17116 case OT_cinfix3_deprecated:
17117 case OT_odd_infix_unc:
17118 if (!unified_syntax)
17120 /* else fall through */
17124 case OT_csuf_or_in3:
17125 inst.cond = cond->value;
17128 case OT_unconditional:
17129 case OT_unconditionalF:
17131 inst.cond = cond->value;
17134 /* Delayed diagnostic. */
17135 inst.error = BAD_COND;
17136 inst.cond = COND_ALWAYS;
17145 /* Cannot have a usual-position infix on a mnemonic of less than
17146 six characters (five would be a suffix). */
17147 if (end - base < 6)
17150 /* Look for infixed mnemonic in the usual position. */
17152 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
17156 memcpy (save, affix, 2);
17157 memmove (affix, affix + 2, (end - affix) - 2);
17158 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
17160 memmove (affix + 2, affix, (end - affix) - 2);
17161 memcpy (affix, save, 2);
17164 && (opcode->tag == OT_cinfix3
17165 || opcode->tag == OT_cinfix3_deprecated
17166 || opcode->tag == OT_csuf_or_in3
17167 || opcode->tag == OT_cinfix3_legacy))
17170 if (warn_on_deprecated && unified_syntax
17171 && (opcode->tag == OT_cinfix3
17172 || opcode->tag == OT_cinfix3_deprecated))
17173 as_warn (_("conditional infixes are deprecated in unified syntax"));
17175 inst.cond = cond->value;
17182 /* This function generates an initial IT instruction, leaving its block
17183 virtually open for the new instructions. Eventually,
17184 the mask will be updated by now_it_add_mask () each time
17185 a new instruction needs to be included in the IT block.
17186 Finally, the block is closed with close_automatic_it_block ().
17187 The block closure can be requested either from md_assemble (),
17188 a tencode (), or due to a label hook. */
17191 new_automatic_it_block (int cond)
17193 now_it.state = AUTOMATIC_IT_BLOCK;
17194 now_it.mask = 0x18;
17196 now_it.block_length = 1;
17197 mapping_state (MAP_THUMB);
17198 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
17199 now_it.warn_deprecated = FALSE;
17200 now_it.insn_cond = TRUE;
17203 /* Close an automatic IT block.
17204 See comments in new_automatic_it_block (). */
17207 close_automatic_it_block (void)
17209 now_it.mask = 0x10;
17210 now_it.block_length = 0;
17213 /* Update the mask of the current automatically-generated IT
17214 instruction. See comments in new_automatic_it_block (). */
17217 now_it_add_mask (int cond)
17219 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
17220 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
17221 | ((bitvalue) << (nbit)))
17222 const int resulting_bit = (cond & 1);
17224 now_it.mask &= 0xf;
17225 now_it.mask = SET_BIT_VALUE (now_it.mask,
17227 (5 - now_it.block_length));
17228 now_it.mask = SET_BIT_VALUE (now_it.mask,
17230 ((5 - now_it.block_length) - 1) );
17231 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
17234 #undef SET_BIT_VALUE
17237 /* The IT blocks handling machinery is accessed through the these functions:
17238 it_fsm_pre_encode () from md_assemble ()
17239 set_it_insn_type () optional, from the tencode functions
17240 set_it_insn_type_last () ditto
17241 in_it_block () ditto
17242 it_fsm_post_encode () from md_assemble ()
17243 force_automatic_it_block_close () from label habdling functions
17246 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
17247 initializing the IT insn type with a generic initial value depending
17248 on the inst.condition.
17249 2) During the tencode function, two things may happen:
17250 a) The tencode function overrides the IT insn type by
17251 calling either set_it_insn_type (type) or set_it_insn_type_last ().
17252 b) The tencode function queries the IT block state by
17253 calling in_it_block () (i.e. to determine narrow/not narrow mode).
17255 Both set_it_insn_type and in_it_block run the internal FSM state
17256 handling function (handle_it_state), because: a) setting the IT insn
17257 type may incur in an invalid state (exiting the function),
17258 and b) querying the state requires the FSM to be updated.
17259 Specifically we want to avoid creating an IT block for conditional
17260 branches, so it_fsm_pre_encode is actually a guess and we can't
17261 determine whether an IT block is required until the tencode () routine
17262 has decided what type of instruction this actually it.
17263 Because of this, if set_it_insn_type and in_it_block have to be used,
17264 set_it_insn_type has to be called first.
17266 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
17267 determines the insn IT type depending on the inst.cond code.
17268 When a tencode () routine encodes an instruction that can be
17269 either outside an IT block, or, in the case of being inside, has to be
17270 the last one, set_it_insn_type_last () will determine the proper
17271 IT instruction type based on the inst.cond code. Otherwise,
17272 set_it_insn_type can be called for overriding that logic or
17273 for covering other cases.
17275 Calling handle_it_state () may not transition the IT block state to
17276 OUTSIDE_IT_BLOCK immediatelly, since the (current) state could be
17277 still queried. Instead, if the FSM determines that the state should
17278 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
17279 after the tencode () function: that's what it_fsm_post_encode () does.
17281 Since in_it_block () calls the state handling function to get an
17282 updated state, an error may occur (due to invalid insns combination).
17283 In that case, inst.error is set.
17284 Therefore, inst.error has to be checked after the execution of
17285 the tencode () routine.
17287 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
17288 any pending state change (if any) that didn't take place in
17289 handle_it_state () as explained above. */
17292 it_fsm_pre_encode (void)
17294 if (inst.cond != COND_ALWAYS)
17295 inst.it_insn_type = INSIDE_IT_INSN;
17297 inst.it_insn_type = OUTSIDE_IT_INSN;
17299 now_it.state_handled = 0;
17302 /* IT state FSM handling function. */
17305 handle_it_state (void)
17307 now_it.state_handled = 1;
17308 now_it.insn_cond = FALSE;
17310 switch (now_it.state)
17312 case OUTSIDE_IT_BLOCK:
17313 switch (inst.it_insn_type)
17315 case OUTSIDE_IT_INSN:
17318 case INSIDE_IT_INSN:
17319 case INSIDE_IT_LAST_INSN:
17320 if (thumb_mode == 0)
17323 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
17324 as_tsktsk (_("Warning: conditional outside an IT block"\
17329 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
17330 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2))
17332 /* Automatically generate the IT instruction. */
17333 new_automatic_it_block (inst.cond);
17334 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
17335 close_automatic_it_block ();
17339 inst.error = BAD_OUT_IT;
17345 case IF_INSIDE_IT_LAST_INSN:
17346 case NEUTRAL_IT_INSN:
17350 now_it.state = MANUAL_IT_BLOCK;
17351 now_it.block_length = 0;
17356 case AUTOMATIC_IT_BLOCK:
17357 /* Three things may happen now:
17358 a) We should increment current it block size;
17359 b) We should close current it block (closing insn or 4 insns);
17360 c) We should close current it block and start a new one (due
17361 to incompatible conditions or
17362 4 insns-length block reached). */
17364 switch (inst.it_insn_type)
17366 case OUTSIDE_IT_INSN:
17367 /* The closure of the block shall happen immediatelly,
17368 so any in_it_block () call reports the block as closed. */
17369 force_automatic_it_block_close ();
17372 case INSIDE_IT_INSN:
17373 case INSIDE_IT_LAST_INSN:
17374 case IF_INSIDE_IT_LAST_INSN:
17375 now_it.block_length++;
17377 if (now_it.block_length > 4
17378 || !now_it_compatible (inst.cond))
17380 force_automatic_it_block_close ();
17381 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
17382 new_automatic_it_block (inst.cond);
17386 now_it.insn_cond = TRUE;
17387 now_it_add_mask (inst.cond);
17390 if (now_it.state == AUTOMATIC_IT_BLOCK
17391 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
17392 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
17393 close_automatic_it_block ();
17396 case NEUTRAL_IT_INSN:
17397 now_it.block_length++;
17398 now_it.insn_cond = TRUE;
17400 if (now_it.block_length > 4)
17401 force_automatic_it_block_close ();
17403 now_it_add_mask (now_it.cc & 1);
17407 close_automatic_it_block ();
17408 now_it.state = MANUAL_IT_BLOCK;
17413 case MANUAL_IT_BLOCK:
17415 /* Check conditional suffixes. */
17416 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
17419 now_it.mask &= 0x1f;
17420 is_last = (now_it.mask == 0x10);
17421 now_it.insn_cond = TRUE;
17423 switch (inst.it_insn_type)
17425 case OUTSIDE_IT_INSN:
17426 inst.error = BAD_NOT_IT;
17429 case INSIDE_IT_INSN:
17430 if (cond != inst.cond)
17432 inst.error = BAD_IT_COND;
17437 case INSIDE_IT_LAST_INSN:
17438 case IF_INSIDE_IT_LAST_INSN:
17439 if (cond != inst.cond)
17441 inst.error = BAD_IT_COND;
17446 inst.error = BAD_BRANCH;
17451 case NEUTRAL_IT_INSN:
17452 /* The BKPT instruction is unconditional even in an IT block. */
17456 inst.error = BAD_IT_IT;
17466 struct depr_insn_mask
17468 unsigned long pattern;
17469 unsigned long mask;
17470 const char* description;
17473 /* List of 16-bit instruction patterns deprecated in an IT block in
17475 static const struct depr_insn_mask depr_it_insns[] = {
17476 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
17477 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
17478 { 0xa000, 0xb800, N_("ADR") },
17479 { 0x4800, 0xf800, N_("Literal loads") },
17480 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
17481 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
17482 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
17483 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
17484 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
17489 it_fsm_post_encode (void)
17493 if (!now_it.state_handled)
17494 handle_it_state ();
17496 if (now_it.insn_cond
17497 && !now_it.warn_deprecated
17498 && warn_on_deprecated
17499 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
17501 if (inst.instruction >= 0x10000)
17503 as_warn (_("IT blocks containing 32-bit Thumb instructions are "
17504 "deprecated in ARMv8"));
17505 now_it.warn_deprecated = TRUE;
17509 const struct depr_insn_mask *p = depr_it_insns;
17511 while (p->mask != 0)
17513 if ((inst.instruction & p->mask) == p->pattern)
17515 as_warn (_("IT blocks containing 16-bit Thumb instructions "
17516 "of the following class are deprecated in ARMv8: "
17517 "%s"), p->description);
17518 now_it.warn_deprecated = TRUE;
17526 if (now_it.block_length > 1)
17528 as_warn (_("IT blocks containing more than one conditional "
17529 "instruction are deprecated in ARMv8"));
17530 now_it.warn_deprecated = TRUE;
17534 is_last = (now_it.mask == 0x10);
17537 now_it.state = OUTSIDE_IT_BLOCK;
17543 force_automatic_it_block_close (void)
17545 if (now_it.state == AUTOMATIC_IT_BLOCK)
17547 close_automatic_it_block ();
17548 now_it.state = OUTSIDE_IT_BLOCK;
17556 if (!now_it.state_handled)
17557 handle_it_state ();
17559 return now_it.state != OUTSIDE_IT_BLOCK;
17563 md_assemble (char *str)
17566 const struct asm_opcode * opcode;
17568 /* Align the previous label if needed. */
17569 if (last_label_seen != NULL)
17571 symbol_set_frag (last_label_seen, frag_now);
17572 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
17573 S_SET_SEGMENT (last_label_seen, now_seg);
17576 memset (&inst, '\0', sizeof (inst));
17577 inst.reloc.type = BFD_RELOC_UNUSED;
17579 opcode = opcode_lookup (&p);
17582 /* It wasn't an instruction, but it might be a register alias of
17583 the form alias .req reg, or a Neon .dn/.qn directive. */
17584 if (! create_register_alias (str, p)
17585 && ! create_neon_reg_alias (str, p))
17586 as_bad (_("bad instruction `%s'"), str);
17591 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
17592 as_warn (_("s suffix on comparison instruction is deprecated"));
17594 /* The value which unconditional instructions should have in place of the
17595 condition field. */
17596 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
17600 arm_feature_set variant;
17602 variant = cpu_variant;
17603 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
17604 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
17605 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
17606 /* Check that this instruction is supported for this CPU. */
17607 if (!opcode->tvariant
17608 || (thumb_mode == 1
17609 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
17611 as_bad (_("selected processor does not support Thumb mode `%s'"), str);
17614 if (inst.cond != COND_ALWAYS && !unified_syntax
17615 && opcode->tencode != do_t_branch)
17617 as_bad (_("Thumb does not support conditional execution"));
17621 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2))
17623 if (opcode->tencode != do_t_blx && opcode->tencode != do_t_branch23
17624 && !(ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_msr)
17625 || ARM_CPU_HAS_FEATURE(*opcode->tvariant, arm_ext_barrier)))
17627 /* Two things are addressed here.
17628 1) Implicit require narrow instructions on Thumb-1.
17629 This avoids relaxation accidentally introducing Thumb-2
17631 2) Reject wide instructions in non Thumb-2 cores. */
17632 if (inst.size_req == 0)
17634 else if (inst.size_req == 4)
17636 as_bad (_("selected processor does not support Thumb-2 mode `%s'"), str);
17642 inst.instruction = opcode->tvalue;
17644 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
17646 /* Prepare the it_insn_type for those encodings that don't set
17648 it_fsm_pre_encode ();
17650 opcode->tencode ();
17652 it_fsm_post_encode ();
17655 if (!(inst.error || inst.relax))
17657 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
17658 inst.size = (inst.instruction > 0xffff ? 4 : 2);
17659 if (inst.size_req && inst.size_req != inst.size)
17661 as_bad (_("cannot honor width suffix -- `%s'"), str);
17666 /* Something has gone badly wrong if we try to relax a fixed size
17668 gas_assert (inst.size_req == 0 || !inst.relax);
17670 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17671 *opcode->tvariant);
17672 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
17673 set those bits when Thumb-2 32-bit instructions are seen. ie.
17674 anything other than bl/blx and v6-M instructions.
17675 This is overly pessimistic for relaxable instructions. */
17676 if (((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
17678 && !(ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
17679 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier)))
17680 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
17683 check_neon_suffixes;
17687 mapping_state (MAP_THUMB);
17690 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
17694 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
17695 is_bx = (opcode->aencode == do_bx);
17697 /* Check that this instruction is supported for this CPU. */
17698 if (!(is_bx && fix_v4bx)
17699 && !(opcode->avariant &&
17700 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
17702 as_bad (_("selected processor does not support ARM mode `%s'"), str);
17707 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
17711 inst.instruction = opcode->avalue;
17712 if (opcode->tag == OT_unconditionalF)
17713 inst.instruction |= 0xF << 28;
17715 inst.instruction |= inst.cond << 28;
17716 inst.size = INSN_SIZE;
17717 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
17719 it_fsm_pre_encode ();
17720 opcode->aencode ();
17721 it_fsm_post_encode ();
17723 /* Arm mode bx is marked as both v4T and v5 because it's still required
17724 on a hypothetical non-thumb v5 core. */
17726 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
17728 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
17729 *opcode->avariant);
17731 check_neon_suffixes;
17735 mapping_state (MAP_ARM);
17740 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
17748 check_it_blocks_finished (void)
17753 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
17754 if (seg_info (sect)->tc_segment_info_data.current_it.state
17755 == MANUAL_IT_BLOCK)
17757 as_warn (_("section '%s' finished with an open IT block."),
17761 if (now_it.state == MANUAL_IT_BLOCK)
17762 as_warn (_("file finished with an open IT block."));
17766 /* Various frobbings of labels and their addresses. */
17769 arm_start_line_hook (void)
17771 last_label_seen = NULL;
17775 arm_frob_label (symbolS * sym)
17777 last_label_seen = sym;
17779 ARM_SET_THUMB (sym, thumb_mode);
17781 #if defined OBJ_COFF || defined OBJ_ELF
17782 ARM_SET_INTERWORK (sym, support_interwork);
17785 force_automatic_it_block_close ();
17787 /* Note - do not allow local symbols (.Lxxx) to be labelled
17788 as Thumb functions. This is because these labels, whilst
17789 they exist inside Thumb code, are not the entry points for
17790 possible ARM->Thumb calls. Also, these labels can be used
17791 as part of a computed goto or switch statement. eg gcc
17792 can generate code that looks like this:
17794 ldr r2, [pc, .Laaa]
17804 The first instruction loads the address of the jump table.
17805 The second instruction converts a table index into a byte offset.
17806 The third instruction gets the jump address out of the table.
17807 The fourth instruction performs the jump.
17809 If the address stored at .Laaa is that of a symbol which has the
17810 Thumb_Func bit set, then the linker will arrange for this address
17811 to have the bottom bit set, which in turn would mean that the
17812 address computation performed by the third instruction would end
17813 up with the bottom bit set. Since the ARM is capable of unaligned
17814 word loads, the instruction would then load the incorrect address
17815 out of the jump table, and chaos would ensue. */
17816 if (label_is_thumb_function_name
17817 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
17818 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
17820 /* When the address of a Thumb function is taken the bottom
17821 bit of that address should be set. This will allow
17822 interworking between Arm and Thumb functions to work
17825 THUMB_SET_FUNC (sym, 1);
17827 label_is_thumb_function_name = FALSE;
17830 dwarf2_emit_label (sym);
17834 arm_data_in_code (void)
17836 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
17838 *input_line_pointer = '/';
17839 input_line_pointer += 5;
17840 *input_line_pointer = 0;
17848 arm_canonicalize_symbol_name (char * name)
17852 if (thumb_mode && (len = strlen (name)) > 5
17853 && streq (name + len - 5, "/data"))
17854 *(name + len - 5) = 0;
17859 /* Table of all register names defined by default. The user can
17860 define additional names with .req. Note that all register names
17861 should appear in both upper and lowercase variants. Some registers
17862 also have mixed-case names. */
17864 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
17865 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
17866 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
17867 #define REGSET(p,t) \
17868 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
17869 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
17870 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
17871 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
17872 #define REGSETH(p,t) \
17873 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
17874 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
17875 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
17876 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
17877 #define REGSET2(p,t) \
17878 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
17879 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
17880 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
17881 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
17882 #define SPLRBANK(base,bank,t) \
17883 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
17884 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
17885 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
17886 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
17887 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
17888 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
17890 static const struct reg_entry reg_names[] =
17892 /* ARM integer registers. */
17893 REGSET(r, RN), REGSET(R, RN),
17895 /* ATPCS synonyms. */
17896 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
17897 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
17898 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
17900 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
17901 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
17902 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
17904 /* Well-known aliases. */
17905 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
17906 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
17908 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
17909 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
17911 /* Coprocessor numbers. */
17912 REGSET(p, CP), REGSET(P, CP),
17914 /* Coprocessor register numbers. The "cr" variants are for backward
17916 REGSET(c, CN), REGSET(C, CN),
17917 REGSET(cr, CN), REGSET(CR, CN),
17919 /* ARM banked registers. */
17920 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
17921 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
17922 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
17923 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
17924 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
17925 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
17926 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
17928 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
17929 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
17930 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
17931 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
17932 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
17933 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
17934 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
17935 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
17937 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
17938 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
17939 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
17940 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
17941 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
17942 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
17943 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
17944 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
17945 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
17947 /* FPA registers. */
17948 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
17949 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
17951 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
17952 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
17954 /* VFP SP registers. */
17955 REGSET(s,VFS), REGSET(S,VFS),
17956 REGSETH(s,VFS), REGSETH(S,VFS),
17958 /* VFP DP Registers. */
17959 REGSET(d,VFD), REGSET(D,VFD),
17960 /* Extra Neon DP registers. */
17961 REGSETH(d,VFD), REGSETH(D,VFD),
17963 /* Neon QP registers. */
17964 REGSET2(q,NQ), REGSET2(Q,NQ),
17966 /* VFP control registers. */
17967 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
17968 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
17969 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
17970 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
17971 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
17972 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
17974 /* Maverick DSP coprocessor registers. */
17975 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
17976 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
17978 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
17979 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
17980 REGDEF(dspsc,0,DSPSC),
17982 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
17983 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
17984 REGDEF(DSPSC,0,DSPSC),
17986 /* iWMMXt data registers - p0, c0-15. */
17987 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
17989 /* iWMMXt control registers - p1, c0-3. */
17990 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
17991 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
17992 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
17993 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
17995 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
17996 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
17997 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
17998 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
17999 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
18001 /* XScale accumulator registers. */
18002 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
18008 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
18009 within psr_required_here. */
18010 static const struct asm_psr psrs[] =
18012 /* Backward compatibility notation. Note that "all" is no longer
18013 truly all possible PSR bits. */
18014 {"all", PSR_c | PSR_f},
18018 /* Individual flags. */
18024 /* Combinations of flags. */
18025 {"fs", PSR_f | PSR_s},
18026 {"fx", PSR_f | PSR_x},
18027 {"fc", PSR_f | PSR_c},
18028 {"sf", PSR_s | PSR_f},
18029 {"sx", PSR_s | PSR_x},
18030 {"sc", PSR_s | PSR_c},
18031 {"xf", PSR_x | PSR_f},
18032 {"xs", PSR_x | PSR_s},
18033 {"xc", PSR_x | PSR_c},
18034 {"cf", PSR_c | PSR_f},
18035 {"cs", PSR_c | PSR_s},
18036 {"cx", PSR_c | PSR_x},
18037 {"fsx", PSR_f | PSR_s | PSR_x},
18038 {"fsc", PSR_f | PSR_s | PSR_c},
18039 {"fxs", PSR_f | PSR_x | PSR_s},
18040 {"fxc", PSR_f | PSR_x | PSR_c},
18041 {"fcs", PSR_f | PSR_c | PSR_s},
18042 {"fcx", PSR_f | PSR_c | PSR_x},
18043 {"sfx", PSR_s | PSR_f | PSR_x},
18044 {"sfc", PSR_s | PSR_f | PSR_c},
18045 {"sxf", PSR_s | PSR_x | PSR_f},
18046 {"sxc", PSR_s | PSR_x | PSR_c},
18047 {"scf", PSR_s | PSR_c | PSR_f},
18048 {"scx", PSR_s | PSR_c | PSR_x},
18049 {"xfs", PSR_x | PSR_f | PSR_s},
18050 {"xfc", PSR_x | PSR_f | PSR_c},
18051 {"xsf", PSR_x | PSR_s | PSR_f},
18052 {"xsc", PSR_x | PSR_s | PSR_c},
18053 {"xcf", PSR_x | PSR_c | PSR_f},
18054 {"xcs", PSR_x | PSR_c | PSR_s},
18055 {"cfs", PSR_c | PSR_f | PSR_s},
18056 {"cfx", PSR_c | PSR_f | PSR_x},
18057 {"csf", PSR_c | PSR_s | PSR_f},
18058 {"csx", PSR_c | PSR_s | PSR_x},
18059 {"cxf", PSR_c | PSR_x | PSR_f},
18060 {"cxs", PSR_c | PSR_x | PSR_s},
18061 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
18062 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
18063 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
18064 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
18065 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
18066 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
18067 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
18068 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
18069 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
18070 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
18071 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
18072 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
18073 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
18074 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
18075 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
18076 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
18077 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
18078 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
18079 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
18080 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
18081 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
18082 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
18083 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
18084 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
18087 /* Table of V7M psr names. */
18088 static const struct asm_psr v7m_psrs[] =
18090 {"apsr", 0 }, {"APSR", 0 },
18091 {"iapsr", 1 }, {"IAPSR", 1 },
18092 {"eapsr", 2 }, {"EAPSR", 2 },
18093 {"psr", 3 }, {"PSR", 3 },
18094 {"xpsr", 3 }, {"XPSR", 3 }, {"xPSR", 3 },
18095 {"ipsr", 5 }, {"IPSR", 5 },
18096 {"epsr", 6 }, {"EPSR", 6 },
18097 {"iepsr", 7 }, {"IEPSR", 7 },
18098 {"msp", 8 }, {"MSP", 8 },
18099 {"psp", 9 }, {"PSP", 9 },
18100 {"primask", 16}, {"PRIMASK", 16},
18101 {"basepri", 17}, {"BASEPRI", 17},
18102 {"basepri_max", 18}, {"BASEPRI_MAX", 18},
18103 {"basepri_max", 18}, {"BASEPRI_MASK", 18}, /* Typo, preserved for backwards compatibility. */
18104 {"faultmask", 19}, {"FAULTMASK", 19},
18105 {"control", 20}, {"CONTROL", 20}
18108 /* Table of all shift-in-operand names. */
18109 static const struct asm_shift_name shift_names [] =
18111 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
18112 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
18113 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
18114 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
18115 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
18116 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
18119 /* Table of all explicit relocation names. */
18121 static struct reloc_entry reloc_names[] =
18123 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
18124 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
18125 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
18126 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
18127 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
18128 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
18129 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
18130 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
18131 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
18132 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
18133 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
18134 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
18135 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
18136 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
18137 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
18138 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
18139 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
18140 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ}
18144 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
18145 static const struct asm_cond conds[] =
18149 {"cs", 0x2}, {"hs", 0x2},
18150 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
18164 #define UL_BARRIER(L,U,CODE,FEAT) \
18165 { L, CODE, ARM_FEATURE (FEAT, 0) }, \
18166 { U, CODE, ARM_FEATURE (FEAT, 0) }
18168 static struct asm_barrier_opt barrier_opt_names[] =
18170 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
18171 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
18172 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
18173 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
18174 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
18175 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
18176 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
18177 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
18178 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
18179 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
18180 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
18181 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
18182 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
18183 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
18184 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
18185 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
18190 /* Table of ARM-format instructions. */
18192 /* Macros for gluing together operand strings. N.B. In all cases
18193 other than OPS0, the trailing OP_stop comes from default
18194 zero-initialization of the unspecified elements of the array. */
18195 #define OPS0() { OP_stop, }
18196 #define OPS1(a) { OP_##a, }
18197 #define OPS2(a,b) { OP_##a,OP_##b, }
18198 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
18199 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
18200 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
18201 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
18203 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
18204 This is useful when mixing operands for ARM and THUMB, i.e. using the
18205 MIX_ARM_THUMB_OPERANDS macro.
18206 In order to use these macros, prefix the number of operands with _
18208 #define OPS_1(a) { a, }
18209 #define OPS_2(a,b) { a,b, }
18210 #define OPS_3(a,b,c) { a,b,c, }
18211 #define OPS_4(a,b,c,d) { a,b,c,d, }
18212 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
18213 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
18215 /* These macros abstract out the exact format of the mnemonic table and
18216 save some repeated characters. */
18218 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
18219 #define TxCE(mnem, op, top, nops, ops, ae, te) \
18220 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
18221 THUMB_VARIANT, do_##ae, do_##te }
18223 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
18224 a T_MNEM_xyz enumerator. */
18225 #define TCE(mnem, aop, top, nops, ops, ae, te) \
18226 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
18227 #define tCE(mnem, aop, top, nops, ops, ae, te) \
18228 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18230 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
18231 infix after the third character. */
18232 #define TxC3(mnem, op, top, nops, ops, ae, te) \
18233 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
18234 THUMB_VARIANT, do_##ae, do_##te }
18235 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
18236 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
18237 THUMB_VARIANT, do_##ae, do_##te }
18238 #define TC3(mnem, aop, top, nops, ops, ae, te) \
18239 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
18240 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
18241 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
18242 #define tC3(mnem, aop, top, nops, ops, ae, te) \
18243 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18244 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
18245 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
18247 /* Mnemonic that cannot be conditionalized. The ARM condition-code
18248 field is still 0xE. Many of the Thumb variants can be executed
18249 conditionally, so this is checked separately. */
18250 #define TUE(mnem, op, top, nops, ops, ae, te) \
18251 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18252 THUMB_VARIANT, do_##ae, do_##te }
18254 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
18255 Used by mnemonics that have very minimal differences in the encoding for
18256 ARM and Thumb variants and can be handled in a common function. */
18257 #define TUEc(mnem, op, top, nops, ops, en) \
18258 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
18259 THUMB_VARIANT, do_##en, do_##en }
18261 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
18262 condition code field. */
18263 #define TUF(mnem, op, top, nops, ops, ae, te) \
18264 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
18265 THUMB_VARIANT, do_##ae, do_##te }
18267 /* ARM-only variants of all the above. */
18268 #define CE(mnem, op, nops, ops, ae) \
18269 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18271 #define C3(mnem, op, nops, ops, ae) \
18272 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18274 /* Legacy mnemonics that always have conditional infix after the third
18276 #define CL(mnem, op, nops, ops, ae) \
18277 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18278 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18280 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
18281 #define cCE(mnem, op, nops, ops, ae) \
18282 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18284 /* Legacy coprocessor instructions where conditional infix and conditional
18285 suffix are ambiguous. For consistency this includes all FPA instructions,
18286 not just the potentially ambiguous ones. */
18287 #define cCL(mnem, op, nops, ops, ae) \
18288 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
18289 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18291 /* Coprocessor, takes either a suffix or a position-3 infix
18292 (for an FPA corner case). */
18293 #define C3E(mnem, op, nops, ops, ae) \
18294 { mnem, OPS##nops ops, OT_csuf_or_in3, \
18295 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
18297 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
18298 { m1 #m2 m3, OPS##nops ops, \
18299 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
18300 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
18302 #define CM(m1, m2, op, nops, ops, ae) \
18303 xCM_ (m1, , m2, op, nops, ops, ae), \
18304 xCM_ (m1, eq, m2, op, nops, ops, ae), \
18305 xCM_ (m1, ne, m2, op, nops, ops, ae), \
18306 xCM_ (m1, cs, m2, op, nops, ops, ae), \
18307 xCM_ (m1, hs, m2, op, nops, ops, ae), \
18308 xCM_ (m1, cc, m2, op, nops, ops, ae), \
18309 xCM_ (m1, ul, m2, op, nops, ops, ae), \
18310 xCM_ (m1, lo, m2, op, nops, ops, ae), \
18311 xCM_ (m1, mi, m2, op, nops, ops, ae), \
18312 xCM_ (m1, pl, m2, op, nops, ops, ae), \
18313 xCM_ (m1, vs, m2, op, nops, ops, ae), \
18314 xCM_ (m1, vc, m2, op, nops, ops, ae), \
18315 xCM_ (m1, hi, m2, op, nops, ops, ae), \
18316 xCM_ (m1, ls, m2, op, nops, ops, ae), \
18317 xCM_ (m1, ge, m2, op, nops, ops, ae), \
18318 xCM_ (m1, lt, m2, op, nops, ops, ae), \
18319 xCM_ (m1, gt, m2, op, nops, ops, ae), \
18320 xCM_ (m1, le, m2, op, nops, ops, ae), \
18321 xCM_ (m1, al, m2, op, nops, ops, ae)
18323 #define UE(mnem, op, nops, ops, ae) \
18324 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18326 #define UF(mnem, op, nops, ops, ae) \
18327 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
18329 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
18330 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
18331 use the same encoding function for each. */
18332 #define NUF(mnem, op, nops, ops, enc) \
18333 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
18334 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18336 /* Neon data processing, version which indirects through neon_enc_tab for
18337 the various overloaded versions of opcodes. */
18338 #define nUF(mnem, op, nops, ops, enc) \
18339 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
18340 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18342 /* Neon insn with conditional suffix for the ARM version, non-overloaded
18344 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
18345 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
18346 THUMB_VARIANT, do_##enc, do_##enc }
18348 #define NCE(mnem, op, nops, ops, enc) \
18349 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18351 #define NCEF(mnem, op, nops, ops, enc) \
18352 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18354 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
18355 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
18356 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
18357 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
18359 #define nCE(mnem, op, nops, ops, enc) \
18360 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
18362 #define nCEF(mnem, op, nops, ops, enc) \
18363 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
18367 static const struct asm_opcode insns[] =
18369 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
18370 #define THUMB_VARIANT & arm_ext_v4t
18371 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
18372 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
18373 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
18374 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
18375 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
18376 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
18377 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
18378 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
18379 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
18380 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
18381 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
18382 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
18383 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
18384 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
18385 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
18386 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
18388 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
18389 for setting PSR flag bits. They are obsolete in V6 and do not
18390 have Thumb equivalents. */
18391 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18392 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
18393 CL("tstp", 110f000, 2, (RR, SH), cmp),
18394 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18395 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
18396 CL("cmpp", 150f000, 2, (RR, SH), cmp),
18397 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18398 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
18399 CL("cmnp", 170f000, 2, (RR, SH), cmp),
18401 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
18402 tC3("movs", 1b00000, _movs, 2, (RR, SH), mov, t_mov_cmp),
18403 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
18404 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
18406 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
18407 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18408 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
18410 OP_ADDRGLDR),ldst, t_ldst),
18411 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
18413 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18414 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18415 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18416 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18417 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18418 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18420 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
18421 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
18422 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
18423 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
18426 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
18427 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
18428 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
18429 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
18431 /* Thumb-compatibility pseudo ops. */
18432 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
18433 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
18434 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
18435 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
18436 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
18437 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
18438 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
18439 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
18440 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
18441 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
18442 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
18443 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
18445 /* These may simplify to neg. */
18446 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
18447 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
18449 #undef THUMB_VARIANT
18450 #define THUMB_VARIANT & arm_ext_v6
18452 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
18454 /* V1 instructions with no Thumb analogue prior to V6T2. */
18455 #undef THUMB_VARIANT
18456 #define THUMB_VARIANT & arm_ext_v6t2
18458 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18459 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
18460 CL("teqp", 130f000, 2, (RR, SH), cmp),
18462 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18463 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18464 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
18465 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
18467 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18468 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18470 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18471 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
18473 /* V1 instructions with no Thumb analogue at all. */
18474 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
18475 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
18477 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
18478 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
18479 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
18480 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
18481 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
18482 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
18483 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
18484 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
18487 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
18488 #undef THUMB_VARIANT
18489 #define THUMB_VARIANT & arm_ext_v4t
18491 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18492 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
18494 #undef THUMB_VARIANT
18495 #define THUMB_VARIANT & arm_ext_v6t2
18497 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18498 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
18500 /* Generic coprocessor instructions. */
18501 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18502 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18503 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18504 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18505 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18506 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18507 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
18510 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
18512 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18513 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
18516 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
18517 #undef THUMB_VARIANT
18518 #define THUMB_VARIANT & arm_ext_msr
18520 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
18521 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
18524 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
18525 #undef THUMB_VARIANT
18526 #define THUMB_VARIANT & arm_ext_v6t2
18528 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18529 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18530 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18531 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18532 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18533 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18534 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
18535 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
18538 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
18539 #undef THUMB_VARIANT
18540 #define THUMB_VARIANT & arm_ext_v4t
18542 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18543 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18544 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18545 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18546 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18547 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
18550 #define ARM_VARIANT & arm_ext_v4t_5
18552 /* ARM Architecture 4T. */
18553 /* Note: bx (and blx) are required on V5, even if the processor does
18554 not support Thumb. */
18555 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
18558 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
18559 #undef THUMB_VARIANT
18560 #define THUMB_VARIANT & arm_ext_v5t
18562 /* Note: blx has 2 variants; the .value coded here is for
18563 BLX(2). Only this variant has conditional execution. */
18564 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
18565 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
18567 #undef THUMB_VARIANT
18568 #define THUMB_VARIANT & arm_ext_v6t2
18570 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
18571 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18572 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18573 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18574 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
18575 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
18576 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18577 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
18580 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
18581 #undef THUMB_VARIANT
18582 #define THUMB_VARIANT & arm_ext_v5exp
18584 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18585 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18586 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18587 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18589 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18590 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
18592 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18593 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18594 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18595 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
18597 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18598 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18599 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18600 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18602 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18603 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18605 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18606 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18607 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18608 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
18611 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
18612 #undef THUMB_VARIANT
18613 #define THUMB_VARIANT & arm_ext_v6t2
18615 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
18616 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
18618 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
18619 ADDRGLDRS), ldrd, t_ldstd),
18621 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18622 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18625 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
18627 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
18630 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
18631 #undef THUMB_VARIANT
18632 #define THUMB_VARIANT & arm_ext_v6
18634 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
18635 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
18636 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18637 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18638 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
18639 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18640 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18641 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18642 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18643 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
18645 #undef THUMB_VARIANT
18646 #define THUMB_VARIANT & arm_ext_v6t2
18648 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
18649 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18651 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18652 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
18654 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
18655 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
18657 /* ARM V6 not included in V7M. */
18658 #undef THUMB_VARIANT
18659 #define THUMB_VARIANT & arm_ext_v6_notm
18660 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18661 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18662 UF(rfeib, 9900a00, 1, (RRw), rfe),
18663 UF(rfeda, 8100a00, 1, (RRw), rfe),
18664 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18665 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
18666 UF(rfefa, 8100a00, 1, (RRw), rfe),
18667 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
18668 UF(rfeed, 9900a00, 1, (RRw), rfe),
18669 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18670 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18671 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
18672 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
18673 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
18674 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
18675 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
18676 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18677 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
18679 /* ARM V6 not included in V7M (eg. integer SIMD). */
18680 #undef THUMB_VARIANT
18681 #define THUMB_VARIANT & arm_ext_v6_dsp
18682 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
18683 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
18684 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
18685 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18686 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18687 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18688 /* Old name for QASX. */
18689 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18690 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18691 /* Old name for QSAX. */
18692 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18693 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18694 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18695 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18696 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18697 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18698 /* Old name for SASX. */
18699 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18700 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18701 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18702 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18703 /* Old name for SHASX. */
18704 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18705 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18706 /* Old name for SHSAX. */
18707 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18708 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18709 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18710 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18711 /* Old name for SSAX. */
18712 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18713 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18714 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18715 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18716 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18717 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18718 /* Old name for UASX. */
18719 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18720 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18721 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18722 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18723 /* Old name for UHASX. */
18724 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18725 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18726 /* Old name for UHSAX. */
18727 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18728 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18729 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18730 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18731 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18732 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18733 /* Old name for UQASX. */
18734 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18735 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18736 /* Old name for UQSAX. */
18737 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18738 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18739 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18740 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18741 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18742 /* Old name for USAX. */
18743 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18744 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18745 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18746 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18747 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18748 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18749 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18750 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18751 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
18752 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
18753 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
18754 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18755 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18756 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18757 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18758 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18759 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18760 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18761 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
18762 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18763 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18764 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18765 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18766 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18767 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18768 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18769 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18770 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18771 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18772 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
18773 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
18774 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
18775 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
18776 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
18779 #define ARM_VARIANT & arm_ext_v6k
18780 #undef THUMB_VARIANT
18781 #define THUMB_VARIANT & arm_ext_v6k
18783 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
18784 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
18785 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
18786 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
18788 #undef THUMB_VARIANT
18789 #define THUMB_VARIANT & arm_ext_v6_notm
18790 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
18792 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
18793 RRnpcb), strexd, t_strexd),
18795 #undef THUMB_VARIANT
18796 #define THUMB_VARIANT & arm_ext_v6t2
18797 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
18799 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
18801 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18803 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
18805 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
18808 #define ARM_VARIANT & arm_ext_sec
18809 #undef THUMB_VARIANT
18810 #define THUMB_VARIANT & arm_ext_sec
18812 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
18815 #define ARM_VARIANT & arm_ext_virt
18816 #undef THUMB_VARIANT
18817 #define THUMB_VARIANT & arm_ext_virt
18819 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
18820 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
18823 #define ARM_VARIANT & arm_ext_v6t2
18824 #undef THUMB_VARIANT
18825 #define THUMB_VARIANT & arm_ext_v6t2
18827 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
18828 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
18829 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18830 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
18832 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
18833 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
18834 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
18835 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
18837 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18838 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18839 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18840 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
18842 /* Thumb-only instructions. */
18844 #define ARM_VARIANT NULL
18845 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
18846 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
18848 /* ARM does not really have an IT instruction, so always allow it.
18849 The opcode is copied from Thumb in order to allow warnings in
18850 -mimplicit-it=[never | arm] modes. */
18852 #define ARM_VARIANT & arm_ext_v1
18854 TUE("it", bf08, bf08, 1, (COND), it, t_it),
18855 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
18856 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
18857 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
18858 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
18859 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
18860 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
18861 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
18862 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
18863 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
18864 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
18865 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
18866 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
18867 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
18868 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
18869 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
18870 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
18871 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
18873 /* Thumb2 only instructions. */
18875 #define ARM_VARIANT NULL
18877 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18878 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
18879 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
18880 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
18881 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
18882 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
18884 /* Hardware division instructions. */
18886 #define ARM_VARIANT & arm_ext_adiv
18887 #undef THUMB_VARIANT
18888 #define THUMB_VARIANT & arm_ext_div
18890 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
18891 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
18893 /* ARM V6M/V7 instructions. */
18895 #define ARM_VARIANT & arm_ext_barrier
18896 #undef THUMB_VARIANT
18897 #define THUMB_VARIANT & arm_ext_barrier
18899 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
18900 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
18901 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
18903 /* ARM V7 instructions. */
18905 #define ARM_VARIANT & arm_ext_v7
18906 #undef THUMB_VARIANT
18907 #define THUMB_VARIANT & arm_ext_v7
18909 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
18910 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
18913 #define ARM_VARIANT & arm_ext_mp
18914 #undef THUMB_VARIANT
18915 #define THUMB_VARIANT & arm_ext_mp
18917 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
18919 /* AArchv8 instructions. */
18921 #define ARM_VARIANT & arm_ext_v8
18922 #undef THUMB_VARIANT
18923 #define THUMB_VARIANT & arm_ext_v8
18925 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
18926 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
18927 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18928 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
18930 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
18931 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18932 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
18934 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
18936 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
18938 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
18940 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18941 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18942 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
18943 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18944 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18945 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
18947 /* ARMv8 T32 only. */
18949 #define ARM_VARIANT NULL
18950 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
18951 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
18952 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
18954 /* FP for ARMv8. */
18956 #define ARM_VARIANT & fpu_vfp_ext_armv8
18957 #undef THUMB_VARIANT
18958 #define THUMB_VARIANT & fpu_vfp_ext_armv8
18960 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
18961 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
18962 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
18963 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
18964 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18965 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
18966 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
18967 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
18968 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
18969 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
18970 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
18971 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
18972 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
18973 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
18974 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
18975 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
18976 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
18978 /* Crypto v1 extensions. */
18980 #define ARM_VARIANT & fpu_crypto_ext_armv8
18981 #undef THUMB_VARIANT
18982 #define THUMB_VARIANT & fpu_crypto_ext_armv8
18984 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
18985 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
18986 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
18987 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
18988 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
18989 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
18990 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
18991 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
18992 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
18993 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
18994 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
18995 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
18996 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
18997 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
19000 #define ARM_VARIANT & crc_ext_armv8
19001 #undef THUMB_VARIANT
19002 #define THUMB_VARIANT & crc_ext_armv8
19003 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
19004 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
19005 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
19006 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
19007 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
19008 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
19011 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
19012 #undef THUMB_VARIANT
19013 #define THUMB_VARIANT NULL
19015 cCE("wfs", e200110, 1, (RR), rd),
19016 cCE("rfs", e300110, 1, (RR), rd),
19017 cCE("wfc", e400110, 1, (RR), rd),
19018 cCE("rfc", e500110, 1, (RR), rd),
19020 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
19021 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
19022 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
19023 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
19025 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
19026 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
19027 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
19028 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
19030 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
19031 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
19032 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
19033 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
19034 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
19035 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
19036 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
19037 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
19038 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
19039 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
19040 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
19041 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
19043 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
19044 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
19045 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
19046 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
19047 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
19048 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
19049 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
19050 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
19051 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
19052 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
19053 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
19054 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
19056 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
19057 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
19058 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
19059 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
19060 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
19061 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
19062 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
19063 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
19064 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
19065 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
19066 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
19067 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
19069 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
19070 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
19071 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
19072 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
19073 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
19074 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
19075 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
19076 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
19077 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
19078 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
19079 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
19080 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
19082 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
19083 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
19084 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
19085 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
19086 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
19087 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
19088 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
19089 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
19090 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
19091 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
19092 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
19093 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
19095 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
19096 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
19097 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
19098 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
19099 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
19100 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
19101 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
19102 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
19103 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
19104 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
19105 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
19106 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
19108 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
19109 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
19110 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
19111 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
19112 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
19113 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
19114 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
19115 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
19116 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
19117 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
19118 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
19119 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
19121 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
19122 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
19123 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
19124 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
19125 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
19126 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
19127 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
19128 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
19129 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
19130 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
19131 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
19132 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
19134 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
19135 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
19136 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
19137 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
19138 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
19139 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
19140 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
19141 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
19142 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
19143 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
19144 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
19145 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
19147 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
19148 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
19149 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
19150 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
19151 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
19152 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
19153 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
19154 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
19155 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
19156 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
19157 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
19158 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
19160 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
19161 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
19162 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
19163 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
19164 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
19165 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
19166 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
19167 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
19168 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
19169 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
19170 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
19171 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
19173 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
19174 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
19175 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
19176 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
19177 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
19178 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
19179 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
19180 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
19181 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
19182 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
19183 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
19184 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
19186 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
19187 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
19188 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
19189 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
19190 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
19191 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
19192 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
19193 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
19194 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
19195 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
19196 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
19197 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
19199 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
19200 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
19201 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
19202 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
19203 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
19204 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
19205 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
19206 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
19207 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
19208 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
19209 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
19210 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
19212 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
19213 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
19214 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
19215 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
19216 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
19217 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
19218 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
19219 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
19220 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
19221 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
19222 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
19223 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
19225 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
19226 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
19227 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
19228 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
19229 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
19230 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
19231 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
19232 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
19233 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
19234 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
19235 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
19236 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
19238 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
19239 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
19240 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
19241 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
19242 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
19243 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19244 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19245 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19246 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
19247 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
19248 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
19249 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
19251 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
19252 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
19253 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
19254 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
19255 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
19256 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19257 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19258 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19259 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
19260 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
19261 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
19262 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
19264 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
19265 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
19266 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
19267 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
19268 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
19269 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19270 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19271 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19272 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
19273 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
19274 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
19275 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
19277 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
19278 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
19279 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
19280 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
19281 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
19282 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19283 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19284 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19285 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
19286 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
19287 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
19288 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
19290 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
19291 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
19292 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
19293 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
19294 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
19295 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19296 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19297 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19298 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
19299 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
19300 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
19301 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
19303 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
19304 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
19305 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
19306 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
19307 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
19308 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19309 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19310 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19311 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
19312 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
19313 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
19314 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
19316 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
19317 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
19318 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
19319 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
19320 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
19321 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19322 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19323 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19324 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
19325 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
19326 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
19327 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
19329 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
19330 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
19331 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
19332 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
19333 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
19334 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19335 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19336 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19337 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
19338 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
19339 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
19340 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
19342 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
19343 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
19344 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
19345 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
19346 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
19347 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19348 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19349 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19350 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
19351 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
19352 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
19353 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
19355 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
19356 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
19357 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
19358 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
19359 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
19360 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19361 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19362 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19363 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
19364 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
19365 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
19366 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
19368 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19369 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19370 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19371 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19372 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19373 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19374 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19375 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19376 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19377 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19378 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19379 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19381 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19382 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19383 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19384 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19385 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19386 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19387 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19388 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19389 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19390 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19391 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19392 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19394 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
19395 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
19396 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
19397 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
19398 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
19399 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
19400 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
19401 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
19402 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
19403 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
19404 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
19405 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
19407 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
19408 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
19409 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
19410 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
19412 cCL("flts", e000110, 2, (RF, RR), rn_rd),
19413 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
19414 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
19415 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
19416 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
19417 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
19418 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
19419 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
19420 cCL("flte", e080110, 2, (RF, RR), rn_rd),
19421 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
19422 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
19423 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
19425 /* The implementation of the FIX instruction is broken on some
19426 assemblers, in that it accepts a precision specifier as well as a
19427 rounding specifier, despite the fact that this is meaningless.
19428 To be more compatible, we accept it as well, though of course it
19429 does not set any bits. */
19430 cCE("fix", e100110, 2, (RR, RF), rd_rm),
19431 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
19432 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
19433 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
19434 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
19435 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
19436 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
19437 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
19438 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
19439 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
19440 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
19441 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
19442 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
19444 /* Instructions that were new with the real FPA, call them V2. */
19446 #define ARM_VARIANT & fpu_fpa_ext_v2
19448 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19449 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19450 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19451 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19452 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19453 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
19456 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
19458 /* Moves and type conversions. */
19459 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
19460 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
19461 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
19462 cCE("fmstat", ef1fa10, 0, (), noargs),
19463 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
19464 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
19465 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
19466 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
19467 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
19468 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19469 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
19470 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
19471 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
19472 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
19474 /* Memory operations. */
19475 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19476 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
19477 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19478 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19479 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19480 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19481 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19482 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19483 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19484 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19485 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19486 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
19487 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19488 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
19489 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19490 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
19491 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19492 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
19494 /* Monadic operations. */
19495 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
19496 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
19497 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
19499 /* Dyadic operations. */
19500 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19501 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19502 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19503 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19504 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19505 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19506 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19507 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19508 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19511 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
19512 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
19513 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
19514 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
19516 /* Double precision load/store are still present on single precision
19517 implementations. */
19518 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19519 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
19520 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19521 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19522 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19523 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19524 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19525 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
19526 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19527 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
19530 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
19532 /* Moves and type conversions. */
19533 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19534 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19535 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19536 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
19537 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
19538 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
19539 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
19540 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
19541 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
19542 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19543 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19544 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
19545 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
19547 /* Monadic operations. */
19548 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19549 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19550 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19552 /* Dyadic operations. */
19553 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19554 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19555 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19556 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19557 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19558 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19559 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19560 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19561 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19564 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
19565 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
19566 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
19567 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
19570 #define ARM_VARIANT & fpu_vfp_ext_v2
19572 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
19573 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
19574 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
19575 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
19577 /* Instructions which may belong to either the Neon or VFP instruction sets.
19578 Individual encoder functions perform additional architecture checks. */
19580 #define ARM_VARIANT & fpu_vfp_ext_v1xd
19581 #undef THUMB_VARIANT
19582 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
19584 /* These mnemonics are unique to VFP. */
19585 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
19586 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
19587 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19588 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19589 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19590 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19591 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
19592 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
19593 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
19594 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
19596 /* Mnemonics shared by Neon and VFP. */
19597 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
19598 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19599 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
19601 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19602 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
19604 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19605 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
19607 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19608 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19609 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19610 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19611 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19612 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
19613 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19614 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
19616 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
19617 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
19618 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
19619 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
19622 /* NOTE: All VMOV encoding is special-cased! */
19623 NCE(vmov, 0, 1, (VMOV), neon_mov),
19624 NCE(vmovq, 0, 1, (VMOV), neon_mov),
19626 #undef THUMB_VARIANT
19627 #define THUMB_VARIANT & fpu_neon_ext_v1
19629 #define ARM_VARIANT & fpu_neon_ext_v1
19631 /* Data processing with three registers of the same length. */
19632 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
19633 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
19634 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
19635 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19636 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19637 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19638 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19639 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
19640 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
19641 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
19642 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19643 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19644 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
19645 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
19646 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19647 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19648 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
19649 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
19650 /* If not immediate, fall back to neon_dyadic_i64_su.
19651 shl_imm should accept I8 I16 I32 I64,
19652 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
19653 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
19654 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
19655 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
19656 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
19657 /* Logic ops, types optional & ignored. */
19658 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19659 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19660 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19661 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19662 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19663 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19664 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
19665 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
19666 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
19667 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
19668 /* Bitfield ops, untyped. */
19669 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19670 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19671 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19672 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19673 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
19674 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
19675 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
19676 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19677 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19678 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19679 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19680 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
19681 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
19682 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
19683 back to neon_dyadic_if_su. */
19684 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19685 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19686 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
19687 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
19688 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19689 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19690 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
19691 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
19692 /* Comparison. Type I8 I16 I32 F32. */
19693 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
19694 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
19695 /* As above, D registers only. */
19696 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19697 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
19698 /* Int and float variants, signedness unimportant. */
19699 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19700 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
19701 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
19702 /* Add/sub take types I8 I16 I32 I64 F32. */
19703 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19704 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
19705 /* vtst takes sizes 8, 16, 32. */
19706 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
19707 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
19708 /* VMUL takes I8 I16 I32 F32 P8. */
19709 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
19710 /* VQD{R}MULH takes S16 S32. */
19711 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19712 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19713 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
19714 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
19715 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19716 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19717 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
19718 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
19719 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19720 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19721 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
19722 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
19723 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19724 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19725 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
19726 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
19728 /* Two address, int/float. Types S8 S16 S32 F32. */
19729 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
19730 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
19732 /* Data processing with two registers and a shift amount. */
19733 /* Right shifts, and variants with rounding.
19734 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
19735 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19736 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19737 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
19738 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
19739 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19740 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19741 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
19742 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
19743 /* Shift and insert. Sizes accepted 8 16 32 64. */
19744 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
19745 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
19746 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
19747 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
19748 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
19749 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
19750 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
19751 /* Right shift immediate, saturating & narrowing, with rounding variants.
19752 Types accepted S16 S32 S64 U16 U32 U64. */
19753 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19754 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
19755 /* As above, unsigned. Types accepted S16 S32 S64. */
19756 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19757 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
19758 /* Right shift narrowing. Types accepted I16 I32 I64. */
19759 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19760 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
19761 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
19762 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
19763 /* CVT with optional immediate for fixed-point variant. */
19764 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
19766 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
19767 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
19769 /* Data processing, three registers of different lengths. */
19770 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
19771 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
19772 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
19773 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
19774 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
19775 /* If not scalar, fall back to neon_dyadic_long.
19776 Vector types as above, scalar types S16 S32 U16 U32. */
19777 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19778 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
19779 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
19780 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19781 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
19782 /* Dyadic, narrowing insns. Types I16 I32 I64. */
19783 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19784 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19785 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19786 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
19787 /* Saturating doubling multiplies. Types S16 S32. */
19788 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19789 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19790 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
19791 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
19792 S16 S32 U16 U32. */
19793 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
19795 /* Extract. Size 8. */
19796 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
19797 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
19799 /* Two registers, miscellaneous. */
19800 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
19801 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
19802 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
19803 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
19804 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
19805 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
19806 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
19807 /* Vector replicate. Sizes 8 16 32. */
19808 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
19809 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
19810 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
19811 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
19812 /* VMOVN. Types I16 I32 I64. */
19813 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
19814 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
19815 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
19816 /* VQMOVUN. Types S16 S32 S64. */
19817 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
19818 /* VZIP / VUZP. Sizes 8 16 32. */
19819 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
19820 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
19821 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
19822 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
19823 /* VQABS / VQNEG. Types S8 S16 S32. */
19824 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19825 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
19826 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
19827 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
19828 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
19829 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
19830 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
19831 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
19832 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
19833 /* Reciprocal estimates. Types U32 F32. */
19834 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
19835 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
19836 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
19837 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
19838 /* VCLS. Types S8 S16 S32. */
19839 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
19840 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
19841 /* VCLZ. Types I8 I16 I32. */
19842 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
19843 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
19844 /* VCNT. Size 8. */
19845 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
19846 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
19847 /* Two address, untyped. */
19848 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
19849 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
19850 /* VTRN. Sizes 8 16 32. */
19851 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
19852 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
19854 /* Table lookup. Size 8. */
19855 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19856 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
19858 #undef THUMB_VARIANT
19859 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
19861 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
19863 /* Neon element/structure load/store. */
19864 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19865 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
19866 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19867 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
19868 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19869 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
19870 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19871 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
19873 #undef THUMB_VARIANT
19874 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
19876 #define ARM_VARIANT & fpu_vfp_ext_v3xd
19877 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
19878 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19879 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19880 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19881 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19882 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19883 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19884 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
19885 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
19887 #undef THUMB_VARIANT
19888 #define THUMB_VARIANT & fpu_vfp_ext_v3
19890 #define ARM_VARIANT & fpu_vfp_ext_v3
19892 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
19893 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19894 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19895 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19896 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19897 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19898 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19899 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
19900 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
19903 #define ARM_VARIANT & fpu_vfp_ext_fma
19904 #undef THUMB_VARIANT
19905 #define THUMB_VARIANT & fpu_vfp_ext_fma
19906 /* Mnemonics shared by Neon and VFP. These are included in the
19907 VFP FMA variant; NEON and VFP FMA always includes the NEON
19908 FMA instructions. */
19909 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19910 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
19911 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
19912 the v form should always be used. */
19913 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19914 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
19915 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19916 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
19917 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19918 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
19920 #undef THUMB_VARIANT
19922 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
19924 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19925 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19926 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19927 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19928 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19929 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
19930 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
19931 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
19934 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
19936 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
19937 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
19938 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
19939 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
19940 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
19941 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
19942 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
19943 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
19944 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
19945 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19946 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19947 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
19948 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19949 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19950 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
19951 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19952 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19953 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
19954 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
19955 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
19956 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19957 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19958 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19959 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19960 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19961 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
19962 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
19963 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
19964 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
19965 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
19966 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
19967 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
19968 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
19969 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
19970 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
19971 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
19972 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
19973 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19974 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19975 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19976 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19977 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19978 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19979 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19980 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19981 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19982 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
19983 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19984 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19985 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19986 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19987 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19988 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19989 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19990 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19991 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19992 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19993 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19994 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19995 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19996 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19997 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19998 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
19999 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20000 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20001 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20002 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20003 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20004 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20005 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20006 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20007 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20008 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20009 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20010 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20011 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20012 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20013 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20014 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20015 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20016 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20017 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20018 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20019 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20020 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20021 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20022 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20023 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20024 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
20025 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20026 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20027 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20028 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20029 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20030 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20031 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20032 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20033 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20034 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20035 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20036 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20037 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20038 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20039 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20040 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20041 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20042 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20043 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20044 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20045 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20046 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
20047 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20048 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20049 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20050 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20051 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20052 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20053 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20054 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20055 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20056 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20057 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20058 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20059 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20060 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20061 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20062 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20063 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
20064 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
20065 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20066 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
20067 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
20068 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
20069 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20070 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20071 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20072 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20073 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20074 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20075 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20076 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20077 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20078 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
20079 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
20080 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
20081 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
20082 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
20083 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
20084 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20085 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20086 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20087 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
20088 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
20089 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
20090 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
20091 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
20092 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
20093 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20094 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20095 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20096 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20097 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
20100 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
20102 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
20103 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
20104 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
20105 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
20106 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
20107 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
20108 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20109 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20110 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20111 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20112 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20113 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20114 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20115 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20116 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20117 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20118 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20119 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20120 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20121 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20122 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
20123 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20124 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20125 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20126 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20127 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20128 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20129 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20130 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20131 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20132 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20133 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20134 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20135 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20136 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20137 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20138 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20139 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20140 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20141 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20142 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20143 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20144 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20145 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20146 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20147 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20148 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20149 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20150 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20151 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20152 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20153 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20154 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20155 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20156 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20157 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20158 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
20161 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
20163 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20164 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20165 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20166 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20167 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
20168 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
20169 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
20170 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
20171 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
20172 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
20173 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
20174 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
20175 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
20176 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
20177 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
20178 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
20179 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
20180 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
20181 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
20182 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
20183 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
20184 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
20185 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
20186 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
20187 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
20188 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
20189 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
20190 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
20191 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
20192 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
20193 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
20194 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
20195 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
20196 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
20197 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
20198 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
20199 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
20200 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
20201 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
20202 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
20203 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
20204 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
20205 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
20206 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
20207 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
20208 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
20209 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
20210 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
20211 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
20212 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
20213 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
20214 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
20215 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
20216 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
20217 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
20218 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
20219 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
20220 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
20221 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
20222 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
20223 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
20224 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
20225 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
20226 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
20227 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20228 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20229 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20230 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20231 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20232 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
20233 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20234 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
20235 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20236 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
20237 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20238 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
20241 #undef THUMB_VARIANT
20267 /* MD interface: bits in the object file. */
20269 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
20270 for use in the a.out file, and stores them in the array pointed to by buf.
20271 This knows about the endian-ness of the target machine and does
20272 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
20273 2 (short) and 4 (long) Floating numbers are put out as a series of
20274 LITTLENUMS (shorts, here at least). */
20277 md_number_to_chars (char * buf, valueT val, int n)
20279 if (target_big_endian)
20280 number_to_chars_bigendian (buf, val, n);
20282 number_to_chars_littleendian (buf, val, n);
20286 md_chars_to_number (char * buf, int n)
20289 unsigned char * where = (unsigned char *) buf;
20291 if (target_big_endian)
20296 result |= (*where++ & 255);
20304 result |= (where[n] & 255);
20311 /* MD interface: Sections. */
20313 /* Calculate the maximum variable size (i.e., excluding fr_fix)
20314 that an rs_machine_dependent frag may reach. */
20317 arm_frag_max_var (fragS *fragp)
20319 /* We only use rs_machine_dependent for variable-size Thumb instructions,
20320 which are either THUMB_SIZE (2) or INSN_SIZE (4).
20322 Note that we generate relaxable instructions even for cases that don't
20323 really need it, like an immediate that's a trivial constant. So we're
20324 overestimating the instruction size for some of those cases. Rather
20325 than putting more intelligence here, it would probably be better to
20326 avoid generating a relaxation frag in the first place when it can be
20327 determined up front that a short instruction will suffice. */
20329 gas_assert (fragp->fr_type == rs_machine_dependent);
20333 /* Estimate the size of a frag before relaxing. Assume everything fits in
20337 md_estimate_size_before_relax (fragS * fragp,
20338 segT segtype ATTRIBUTE_UNUSED)
20344 /* Convert a machine dependent frag. */
20347 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
20349 unsigned long insn;
20350 unsigned long old_op;
20358 buf = fragp->fr_literal + fragp->fr_fix;
20360 old_op = bfd_get_16(abfd, buf);
20361 if (fragp->fr_symbol)
20363 exp.X_op = O_symbol;
20364 exp.X_add_symbol = fragp->fr_symbol;
20368 exp.X_op = O_constant;
20370 exp.X_add_number = fragp->fr_offset;
20371 opcode = fragp->fr_subtype;
20374 case T_MNEM_ldr_pc:
20375 case T_MNEM_ldr_pc2:
20376 case T_MNEM_ldr_sp:
20377 case T_MNEM_str_sp:
20384 if (fragp->fr_var == 4)
20386 insn = THUMB_OP32 (opcode);
20387 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
20389 insn |= (old_op & 0x700) << 4;
20393 insn |= (old_op & 7) << 12;
20394 insn |= (old_op & 0x38) << 13;
20396 insn |= 0x00000c00;
20397 put_thumb32_insn (buf, insn);
20398 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
20402 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
20404 pc_rel = (opcode == T_MNEM_ldr_pc2);
20407 if (fragp->fr_var == 4)
20409 insn = THUMB_OP32 (opcode);
20410 insn |= (old_op & 0xf0) << 4;
20411 put_thumb32_insn (buf, insn);
20412 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
20416 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20417 exp.X_add_number -= 4;
20425 if (fragp->fr_var == 4)
20427 int r0off = (opcode == T_MNEM_mov
20428 || opcode == T_MNEM_movs) ? 0 : 8;
20429 insn = THUMB_OP32 (opcode);
20430 insn = (insn & 0xe1ffffff) | 0x10000000;
20431 insn |= (old_op & 0x700) << r0off;
20432 put_thumb32_insn (buf, insn);
20433 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20437 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
20442 if (fragp->fr_var == 4)
20444 insn = THUMB_OP32(opcode);
20445 put_thumb32_insn (buf, insn);
20446 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
20449 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
20453 if (fragp->fr_var == 4)
20455 insn = THUMB_OP32(opcode);
20456 insn |= (old_op & 0xf00) << 14;
20457 put_thumb32_insn (buf, insn);
20458 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
20461 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
20464 case T_MNEM_add_sp:
20465 case T_MNEM_add_pc:
20466 case T_MNEM_inc_sp:
20467 case T_MNEM_dec_sp:
20468 if (fragp->fr_var == 4)
20470 /* ??? Choose between add and addw. */
20471 insn = THUMB_OP32 (opcode);
20472 insn |= (old_op & 0xf0) << 4;
20473 put_thumb32_insn (buf, insn);
20474 if (opcode == T_MNEM_add_pc)
20475 reloc_type = BFD_RELOC_ARM_T32_IMM12;
20477 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20480 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20488 if (fragp->fr_var == 4)
20490 insn = THUMB_OP32 (opcode);
20491 insn |= (old_op & 0xf0) << 4;
20492 insn |= (old_op & 0xf) << 16;
20493 put_thumb32_insn (buf, insn);
20494 if (insn & (1 << 20))
20495 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
20497 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
20500 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
20506 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
20507 (enum bfd_reloc_code_real) reloc_type);
20508 fixp->fx_file = fragp->fr_file;
20509 fixp->fx_line = fragp->fr_line;
20510 fragp->fr_fix += fragp->fr_var;
20513 /* Return the size of a relaxable immediate operand instruction.
20514 SHIFT and SIZE specify the form of the allowable immediate. */
20516 relax_immediate (fragS *fragp, int size, int shift)
20522 /* ??? Should be able to do better than this. */
20523 if (fragp->fr_symbol)
20526 low = (1 << shift) - 1;
20527 mask = (1 << (shift + size)) - (1 << shift);
20528 offset = fragp->fr_offset;
20529 /* Force misaligned offsets to 32-bit variant. */
20532 if (offset & ~mask)
20537 /* Get the address of a symbol during relaxation. */
20539 relaxed_symbol_addr (fragS *fragp, long stretch)
20545 sym = fragp->fr_symbol;
20546 sym_frag = symbol_get_frag (sym);
20547 know (S_GET_SEGMENT (sym) != absolute_section
20548 || sym_frag == &zero_address_frag);
20549 addr = S_GET_VALUE (sym) + fragp->fr_offset;
20551 /* If frag has yet to be reached on this pass, assume it will
20552 move by STRETCH just as we did. If this is not so, it will
20553 be because some frag between grows, and that will force
20557 && sym_frag->relax_marker != fragp->relax_marker)
20561 /* Adjust stretch for any alignment frag. Note that if have
20562 been expanding the earlier code, the symbol may be
20563 defined in what appears to be an earlier frag. FIXME:
20564 This doesn't handle the fr_subtype field, which specifies
20565 a maximum number of bytes to skip when doing an
20567 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
20569 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
20572 stretch = - ((- stretch)
20573 & ~ ((1 << (int) f->fr_offset) - 1));
20575 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
20587 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
20590 relax_adr (fragS *fragp, asection *sec, long stretch)
20595 /* Assume worst case for symbols not known to be in the same section. */
20596 if (fragp->fr_symbol == NULL
20597 || !S_IS_DEFINED (fragp->fr_symbol)
20598 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20599 || S_IS_WEAK (fragp->fr_symbol))
20602 val = relaxed_symbol_addr (fragp, stretch);
20603 addr = fragp->fr_address + fragp->fr_fix;
20604 addr = (addr + 4) & ~3;
20605 /* Force misaligned targets to 32-bit variant. */
20609 if (val < 0 || val > 1020)
20614 /* Return the size of a relaxable add/sub immediate instruction. */
20616 relax_addsub (fragS *fragp, asection *sec)
20621 buf = fragp->fr_literal + fragp->fr_fix;
20622 op = bfd_get_16(sec->owner, buf);
20623 if ((op & 0xf) == ((op >> 4) & 0xf))
20624 return relax_immediate (fragp, 8, 0);
20626 return relax_immediate (fragp, 3, 0);
20629 /* Return TRUE iff the definition of symbol S could be pre-empted
20630 (overridden) at link or load time. */
20632 symbol_preemptible (symbolS *s)
20634 /* Weak symbols can always be pre-empted. */
20638 /* Non-global symbols cannot be pre-empted. */
20639 if (! S_IS_EXTERNAL (s))
20643 /* In ELF, a global symbol can be marked protected, or private. In that
20644 case it can't be pre-empted (other definitions in the same link unit
20645 would violate the ODR). */
20646 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
20650 /* Other global symbols might be pre-empted. */
20654 /* Return the size of a relaxable branch instruction. BITS is the
20655 size of the offset field in the narrow instruction. */
20658 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
20664 /* Assume worst case for symbols not known to be in the same section. */
20665 if (!S_IS_DEFINED (fragp->fr_symbol)
20666 || sec != S_GET_SEGMENT (fragp->fr_symbol)
20667 || S_IS_WEAK (fragp->fr_symbol))
20671 /* A branch to a function in ARM state will require interworking. */
20672 if (S_IS_DEFINED (fragp->fr_symbol)
20673 && ARM_IS_FUNC (fragp->fr_symbol))
20677 if (symbol_preemptible (fragp->fr_symbol))
20680 val = relaxed_symbol_addr (fragp, stretch);
20681 addr = fragp->fr_address + fragp->fr_fix + 4;
20684 /* Offset is a signed value *2 */
20686 if (val >= limit || val < -limit)
20692 /* Relax a machine dependent frag. This returns the amount by which
20693 the current size of the frag should change. */
20696 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
20701 oldsize = fragp->fr_var;
20702 switch (fragp->fr_subtype)
20704 case T_MNEM_ldr_pc2:
20705 newsize = relax_adr (fragp, sec, stretch);
20707 case T_MNEM_ldr_pc:
20708 case T_MNEM_ldr_sp:
20709 case T_MNEM_str_sp:
20710 newsize = relax_immediate (fragp, 8, 2);
20714 newsize = relax_immediate (fragp, 5, 2);
20718 newsize = relax_immediate (fragp, 5, 1);
20722 newsize = relax_immediate (fragp, 5, 0);
20725 newsize = relax_adr (fragp, sec, stretch);
20731 newsize = relax_immediate (fragp, 8, 0);
20734 newsize = relax_branch (fragp, sec, 11, stretch);
20737 newsize = relax_branch (fragp, sec, 8, stretch);
20739 case T_MNEM_add_sp:
20740 case T_MNEM_add_pc:
20741 newsize = relax_immediate (fragp, 8, 2);
20743 case T_MNEM_inc_sp:
20744 case T_MNEM_dec_sp:
20745 newsize = relax_immediate (fragp, 7, 2);
20751 newsize = relax_addsub (fragp, sec);
20757 fragp->fr_var = newsize;
20758 /* Freeze wide instructions that are at or before the same location as
20759 in the previous pass. This avoids infinite loops.
20760 Don't freeze them unconditionally because targets may be artificially
20761 misaligned by the expansion of preceding frags. */
20762 if (stretch <= 0 && newsize > 2)
20764 md_convert_frag (sec->owner, sec, fragp);
20768 return newsize - oldsize;
20771 /* Round up a section size to the appropriate boundary. */
20774 md_section_align (segT segment ATTRIBUTE_UNUSED,
20777 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
20778 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
20780 /* For a.out, force the section size to be aligned. If we don't do
20781 this, BFD will align it for us, but it will not write out the
20782 final bytes of the section. This may be a bug in BFD, but it is
20783 easier to fix it here since that is how the other a.out targets
20787 align = bfd_get_section_alignment (stdoutput, segment);
20788 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
20795 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
20796 of an rs_align_code fragment. */
20799 arm_handle_align (fragS * fragP)
20801 static char const arm_noop[2][2][4] =
20804 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
20805 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
20808 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
20809 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
20812 static char const thumb_noop[2][2][2] =
20815 {0xc0, 0x46}, /* LE */
20816 {0x46, 0xc0}, /* BE */
20819 {0x00, 0xbf}, /* LE */
20820 {0xbf, 0x00} /* BE */
20823 static char const wide_thumb_noop[2][4] =
20824 { /* Wide Thumb-2 */
20825 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
20826 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
20829 unsigned bytes, fix, noop_size;
20832 const char *narrow_noop = NULL;
20837 if (fragP->fr_type != rs_align_code)
20840 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
20841 p = fragP->fr_literal + fragP->fr_fix;
20844 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
20845 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
20847 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
20849 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
20851 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
20853 narrow_noop = thumb_noop[1][target_big_endian];
20854 noop = wide_thumb_noop[target_big_endian];
20857 noop = thumb_noop[0][target_big_endian];
20865 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k) != 0]
20866 [target_big_endian];
20873 fragP->fr_var = noop_size;
20875 if (bytes & (noop_size - 1))
20877 fix = bytes & (noop_size - 1);
20879 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
20881 memset (p, 0, fix);
20888 if (bytes & noop_size)
20890 /* Insert a narrow noop. */
20891 memcpy (p, narrow_noop, noop_size);
20893 bytes -= noop_size;
20897 /* Use wide noops for the remainder */
20901 while (bytes >= noop_size)
20903 memcpy (p, noop, noop_size);
20905 bytes -= noop_size;
20909 fragP->fr_fix += fix;
20912 /* Called from md_do_align. Used to create an alignment
20913 frag in a code section. */
20916 arm_frag_align_code (int n, int max)
20920 /* We assume that there will never be a requirement
20921 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
20922 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
20927 _("alignments greater than %d bytes not supported in .text sections."),
20928 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
20929 as_fatal ("%s", err_msg);
20932 p = frag_var (rs_align_code,
20933 MAX_MEM_FOR_RS_ALIGN_CODE,
20935 (relax_substateT) max,
20942 /* Perform target specific initialisation of a frag.
20943 Note - despite the name this initialisation is not done when the frag
20944 is created, but only when its type is assigned. A frag can be created
20945 and used a long time before its type is set, so beware of assuming that
20946 this initialisationis performed first. */
20950 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
20952 /* Record whether this frag is in an ARM or a THUMB area. */
20953 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20956 #else /* OBJ_ELF is defined. */
20958 arm_init_frag (fragS * fragP, int max_chars)
20960 /* If the current ARM vs THUMB mode has not already
20961 been recorded into this frag then do so now. */
20962 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
20964 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
20966 /* Record a mapping symbol for alignment frags. We will delete this
20967 later if the alignment ends up empty. */
20968 switch (fragP->fr_type)
20971 case rs_align_test:
20973 mapping_state_2 (MAP_DATA, max_chars);
20975 case rs_align_code:
20976 mapping_state_2 (thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
20984 /* When we change sections we need to issue a new mapping symbol. */
20987 arm_elf_change_section (void)
20989 /* Link an unlinked unwind index table section to the .text section. */
20990 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
20991 && elf_linked_to_section (now_seg) == NULL)
20992 elf_linked_to_section (now_seg) = text_section;
20996 arm_elf_section_type (const char * str, size_t len)
20998 if (len == 5 && strncmp (str, "exidx", 5) == 0)
20999 return SHT_ARM_EXIDX;
21004 /* Code to deal with unwinding tables. */
21006 static void add_unwind_adjustsp (offsetT);
21008 /* Generate any deferred unwind frame offset. */
21011 flush_pending_unwind (void)
21015 offset = unwind.pending_offset;
21016 unwind.pending_offset = 0;
21018 add_unwind_adjustsp (offset);
21021 /* Add an opcode to this list for this function. Two-byte opcodes should
21022 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
21026 add_unwind_opcode (valueT op, int length)
21028 /* Add any deferred stack adjustment. */
21029 if (unwind.pending_offset)
21030 flush_pending_unwind ();
21032 unwind.sp_restored = 0;
21034 if (unwind.opcode_count + length > unwind.opcode_alloc)
21036 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
21037 if (unwind.opcodes)
21038 unwind.opcodes = (unsigned char *) xrealloc (unwind.opcodes,
21039 unwind.opcode_alloc);
21041 unwind.opcodes = (unsigned char *) xmalloc (unwind.opcode_alloc);
21046 unwind.opcodes[unwind.opcode_count] = op & 0xff;
21048 unwind.opcode_count++;
21052 /* Add unwind opcodes to adjust the stack pointer. */
21055 add_unwind_adjustsp (offsetT offset)
21059 if (offset > 0x200)
21061 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
21066 /* Long form: 0xb2, uleb128. */
21067 /* This might not fit in a word so add the individual bytes,
21068 remembering the list is built in reverse order. */
21069 o = (valueT) ((offset - 0x204) >> 2);
21071 add_unwind_opcode (0, 1);
21073 /* Calculate the uleb128 encoding of the offset. */
21077 bytes[n] = o & 0x7f;
21083 /* Add the insn. */
21085 add_unwind_opcode (bytes[n - 1], 1);
21086 add_unwind_opcode (0xb2, 1);
21088 else if (offset > 0x100)
21090 /* Two short opcodes. */
21091 add_unwind_opcode (0x3f, 1);
21092 op = (offset - 0x104) >> 2;
21093 add_unwind_opcode (op, 1);
21095 else if (offset > 0)
21097 /* Short opcode. */
21098 op = (offset - 4) >> 2;
21099 add_unwind_opcode (op, 1);
21101 else if (offset < 0)
21104 while (offset > 0x100)
21106 add_unwind_opcode (0x7f, 1);
21109 op = ((offset - 4) >> 2) | 0x40;
21110 add_unwind_opcode (op, 1);
21114 /* Finish the list of unwind opcodes for this function. */
21116 finish_unwind_opcodes (void)
21120 if (unwind.fp_used)
21122 /* Adjust sp as necessary. */
21123 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
21124 flush_pending_unwind ();
21126 /* After restoring sp from the frame pointer. */
21127 op = 0x90 | unwind.fp_reg;
21128 add_unwind_opcode (op, 1);
21131 flush_pending_unwind ();
21135 /* Start an exception table entry. If idx is nonzero this is an index table
21139 start_unwind_section (const segT text_seg, int idx)
21141 const char * text_name;
21142 const char * prefix;
21143 const char * prefix_once;
21144 const char * group_name;
21148 size_t sec_name_len;
21155 prefix = ELF_STRING_ARM_unwind;
21156 prefix_once = ELF_STRING_ARM_unwind_once;
21157 type = SHT_ARM_EXIDX;
21161 prefix = ELF_STRING_ARM_unwind_info;
21162 prefix_once = ELF_STRING_ARM_unwind_info_once;
21163 type = SHT_PROGBITS;
21166 text_name = segment_name (text_seg);
21167 if (streq (text_name, ".text"))
21170 if (strncmp (text_name, ".gnu.linkonce.t.",
21171 strlen (".gnu.linkonce.t.")) == 0)
21173 prefix = prefix_once;
21174 text_name += strlen (".gnu.linkonce.t.");
21177 prefix_len = strlen (prefix);
21178 text_len = strlen (text_name);
21179 sec_name_len = prefix_len + text_len;
21180 sec_name = (char *) xmalloc (sec_name_len + 1);
21181 memcpy (sec_name, prefix, prefix_len);
21182 memcpy (sec_name + prefix_len, text_name, text_len);
21183 sec_name[prefix_len + text_len] = '\0';
21189 /* Handle COMDAT group. */
21190 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
21192 group_name = elf_group_name (text_seg);
21193 if (group_name == NULL)
21195 as_bad (_("Group section `%s' has no group signature"),
21196 segment_name (text_seg));
21197 ignore_rest_of_line ();
21200 flags |= SHF_GROUP;
21204 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
21206 /* Set the section link for index tables. */
21208 elf_linked_to_section (now_seg) = text_seg;
21212 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
21213 personality routine data. Returns zero, or the index table value for
21214 an inline entry. */
21217 create_unwind_entry (int have_data)
21222 /* The current word of data. */
21224 /* The number of bytes left in this word. */
21227 finish_unwind_opcodes ();
21229 /* Remember the current text section. */
21230 unwind.saved_seg = now_seg;
21231 unwind.saved_subseg = now_subseg;
21233 start_unwind_section (now_seg, 0);
21235 if (unwind.personality_routine == NULL)
21237 if (unwind.personality_index == -2)
21240 as_bad (_("handlerdata in cantunwind frame"));
21241 return 1; /* EXIDX_CANTUNWIND. */
21244 /* Use a default personality routine if none is specified. */
21245 if (unwind.personality_index == -1)
21247 if (unwind.opcode_count > 3)
21248 unwind.personality_index = 1;
21250 unwind.personality_index = 0;
21253 /* Space for the personality routine entry. */
21254 if (unwind.personality_index == 0)
21256 if (unwind.opcode_count > 3)
21257 as_bad (_("too many unwind opcodes for personality routine 0"));
21261 /* All the data is inline in the index table. */
21264 while (unwind.opcode_count > 0)
21266 unwind.opcode_count--;
21267 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21271 /* Pad with "finish" opcodes. */
21273 data = (data << 8) | 0xb0;
21280 /* We get two opcodes "free" in the first word. */
21281 size = unwind.opcode_count - 2;
21285 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
21286 if (unwind.personality_index != -1)
21288 as_bad (_("attempt to recreate an unwind entry"));
21292 /* An extra byte is required for the opcode count. */
21293 size = unwind.opcode_count + 1;
21296 size = (size + 3) >> 2;
21298 as_bad (_("too many unwind opcodes"));
21300 frag_align (2, 0, 0);
21301 record_alignment (now_seg, 2);
21302 unwind.table_entry = expr_build_dot ();
21304 /* Allocate the table entry. */
21305 ptr = frag_more ((size << 2) + 4);
21306 /* PR 13449: Zero the table entries in case some of them are not used. */
21307 memset (ptr, 0, (size << 2) + 4);
21308 where = frag_now_fix () - ((size << 2) + 4);
21310 switch (unwind.personality_index)
21313 /* ??? Should this be a PLT generating relocation? */
21314 /* Custom personality routine. */
21315 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
21316 BFD_RELOC_ARM_PREL31);
21321 /* Set the first byte to the number of additional words. */
21322 data = size > 0 ? size - 1 : 0;
21326 /* ABI defined personality routines. */
21328 /* Three opcodes bytes are packed into the first word. */
21335 /* The size and first two opcode bytes go in the first word. */
21336 data = ((0x80 + unwind.personality_index) << 8) | size;
21341 /* Should never happen. */
21345 /* Pack the opcodes into words (MSB first), reversing the list at the same
21347 while (unwind.opcode_count > 0)
21351 md_number_to_chars (ptr, data, 4);
21356 unwind.opcode_count--;
21358 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
21361 /* Finish off the last word. */
21364 /* Pad with "finish" opcodes. */
21366 data = (data << 8) | 0xb0;
21368 md_number_to_chars (ptr, data, 4);
21373 /* Add an empty descriptor if there is no user-specified data. */
21374 ptr = frag_more (4);
21375 md_number_to_chars (ptr, 0, 4);
21382 /* Initialize the DWARF-2 unwind information for this procedure. */
21385 tc_arm_frame_initial_instructions (void)
21387 cfi_add_CFA_def_cfa (REG_SP, 0);
21389 #endif /* OBJ_ELF */
21391 /* Convert REGNAME to a DWARF-2 register number. */
21394 tc_arm_regname_to_dw2regnum (char *regname)
21396 int reg = arm_reg_parse (®name, REG_TYPE_RN);
21400 /* PR 16694: Allow VFP registers as well. */
21401 reg = arm_reg_parse (®name, REG_TYPE_VFS);
21405 reg = arm_reg_parse (®name, REG_TYPE_VFD);
21414 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
21418 exp.X_op = O_secrel;
21419 exp.X_add_symbol = symbol;
21420 exp.X_add_number = 0;
21421 emit_expr (&exp, size);
21425 /* MD interface: Symbol and relocation handling. */
21427 /* Return the address within the segment that a PC-relative fixup is
21428 relative to. For ARM, PC-relative fixups applied to instructions
21429 are generally relative to the location of the fixup plus 8 bytes.
21430 Thumb branches are offset by 4, and Thumb loads relative to PC
21431 require special handling. */
21434 md_pcrel_from_section (fixS * fixP, segT seg)
21436 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
21438 /* If this is pc-relative and we are going to emit a relocation
21439 then we just want to put out any pipeline compensation that the linker
21440 will need. Otherwise we want to use the calculated base.
21441 For WinCE we skip the bias for externals as well, since this
21442 is how the MS ARM-CE assembler behaves and we want to be compatible. */
21444 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
21445 || (arm_force_relocation (fixP)
21447 && !S_IS_EXTERNAL (fixP->fx_addsy)
21453 switch (fixP->fx_r_type)
21455 /* PC relative addressing on the Thumb is slightly odd as the
21456 bottom two bits of the PC are forced to zero for the
21457 calculation. This happens *after* application of the
21458 pipeline offset. However, Thumb adrl already adjusts for
21459 this, so we need not do it again. */
21460 case BFD_RELOC_ARM_THUMB_ADD:
21463 case BFD_RELOC_ARM_THUMB_OFFSET:
21464 case BFD_RELOC_ARM_T32_OFFSET_IMM:
21465 case BFD_RELOC_ARM_T32_ADD_PC12:
21466 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
21467 return (base + 4) & ~3;
21469 /* Thumb branches are simply offset by +4. */
21470 case BFD_RELOC_THUMB_PCREL_BRANCH7:
21471 case BFD_RELOC_THUMB_PCREL_BRANCH9:
21472 case BFD_RELOC_THUMB_PCREL_BRANCH12:
21473 case BFD_RELOC_THUMB_PCREL_BRANCH20:
21474 case BFD_RELOC_THUMB_PCREL_BRANCH25:
21477 case BFD_RELOC_THUMB_PCREL_BRANCH23:
21479 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21480 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21481 && ARM_IS_FUNC (fixP->fx_addsy)
21482 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21483 base = fixP->fx_where + fixP->fx_frag->fr_address;
21486 /* BLX is like branches above, but forces the low two bits of PC to
21488 case BFD_RELOC_THUMB_PCREL_BLX:
21490 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21491 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21492 && THUMB_IS_FUNC (fixP->fx_addsy)
21493 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21494 base = fixP->fx_where + fixP->fx_frag->fr_address;
21495 return (base + 4) & ~3;
21497 /* ARM mode branches are offset by +8. However, the Windows CE
21498 loader expects the relocation not to take this into account. */
21499 case BFD_RELOC_ARM_PCREL_BLX:
21501 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21502 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21503 && ARM_IS_FUNC (fixP->fx_addsy)
21504 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21505 base = fixP->fx_where + fixP->fx_frag->fr_address;
21508 case BFD_RELOC_ARM_PCREL_CALL:
21510 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21511 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
21512 && THUMB_IS_FUNC (fixP->fx_addsy)
21513 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
21514 base = fixP->fx_where + fixP->fx_frag->fr_address;
21517 case BFD_RELOC_ARM_PCREL_BRANCH:
21518 case BFD_RELOC_ARM_PCREL_JUMP:
21519 case BFD_RELOC_ARM_PLT32:
21521 /* When handling fixups immediately, because we have already
21522 discovered the value of a symbol, or the address of the frag involved
21523 we must account for the offset by +8, as the OS loader will never see the reloc.
21524 see fixup_segment() in write.c
21525 The S_IS_EXTERNAL test handles the case of global symbols.
21526 Those need the calculated base, not just the pipe compensation the linker will need. */
21528 && fixP->fx_addsy != NULL
21529 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
21530 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
21538 /* ARM mode loads relative to PC are also offset by +8. Unlike
21539 branches, the Windows CE loader *does* expect the relocation
21540 to take this into account. */
21541 case BFD_RELOC_ARM_OFFSET_IMM:
21542 case BFD_RELOC_ARM_OFFSET_IMM8:
21543 case BFD_RELOC_ARM_HWLITERAL:
21544 case BFD_RELOC_ARM_LITERAL:
21545 case BFD_RELOC_ARM_CP_OFF_IMM:
21549 /* Other PC-relative relocations are un-offset. */
21555 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
21556 Otherwise we have no need to default values of symbols. */
21559 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
21562 if (name[0] == '_' && name[1] == 'G'
21563 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
21567 if (symbol_find (name))
21568 as_bad (_("GOT already in the symbol table"));
21570 GOT_symbol = symbol_new (name, undefined_section,
21571 (valueT) 0, & zero_address_frag);
21581 /* Subroutine of md_apply_fix. Check to see if an immediate can be
21582 computed as two separate immediate values, added together. We
21583 already know that this value cannot be computed by just one ARM
21586 static unsigned int
21587 validate_immediate_twopart (unsigned int val,
21588 unsigned int * highpart)
21593 for (i = 0; i < 32; i += 2)
21594 if (((a = rotate_left (val, i)) & 0xff) != 0)
21600 * highpart = (a >> 8) | ((i + 24) << 7);
21602 else if (a & 0xff0000)
21604 if (a & 0xff000000)
21606 * highpart = (a >> 16) | ((i + 16) << 7);
21610 gas_assert (a & 0xff000000);
21611 * highpart = (a >> 24) | ((i + 8) << 7);
21614 return (a & 0xff) | (i << 7);
21621 validate_offset_imm (unsigned int val, int hwse)
21623 if ((hwse && val > 255) || val > 4095)
21628 /* Subroutine of md_apply_fix. Do those data_ops which can take a
21629 negative immediate constant by altering the instruction. A bit of
21634 by inverting the second operand, and
21637 by negating the second operand. */
21640 negate_data_op (unsigned long * instruction,
21641 unsigned long value)
21644 unsigned long negated, inverted;
21646 negated = encode_arm_immediate (-value);
21647 inverted = encode_arm_immediate (~value);
21649 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
21652 /* First negates. */
21653 case OPCODE_SUB: /* ADD <-> SUB */
21654 new_inst = OPCODE_ADD;
21659 new_inst = OPCODE_SUB;
21663 case OPCODE_CMP: /* CMP <-> CMN */
21664 new_inst = OPCODE_CMN;
21669 new_inst = OPCODE_CMP;
21673 /* Now Inverted ops. */
21674 case OPCODE_MOV: /* MOV <-> MVN */
21675 new_inst = OPCODE_MVN;
21680 new_inst = OPCODE_MOV;
21684 case OPCODE_AND: /* AND <-> BIC */
21685 new_inst = OPCODE_BIC;
21690 new_inst = OPCODE_AND;
21694 case OPCODE_ADC: /* ADC <-> SBC */
21695 new_inst = OPCODE_SBC;
21700 new_inst = OPCODE_ADC;
21704 /* We cannot do anything. */
21709 if (value == (unsigned) FAIL)
21712 *instruction &= OPCODE_MASK;
21713 *instruction |= new_inst << DATA_OP_SHIFT;
21717 /* Like negate_data_op, but for Thumb-2. */
21719 static unsigned int
21720 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
21724 unsigned int negated, inverted;
21726 negated = encode_thumb32_immediate (-value);
21727 inverted = encode_thumb32_immediate (~value);
21729 rd = (*instruction >> 8) & 0xf;
21730 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
21733 /* ADD <-> SUB. Includes CMP <-> CMN. */
21734 case T2_OPCODE_SUB:
21735 new_inst = T2_OPCODE_ADD;
21739 case T2_OPCODE_ADD:
21740 new_inst = T2_OPCODE_SUB;
21744 /* ORR <-> ORN. Includes MOV <-> MVN. */
21745 case T2_OPCODE_ORR:
21746 new_inst = T2_OPCODE_ORN;
21750 case T2_OPCODE_ORN:
21751 new_inst = T2_OPCODE_ORR;
21755 /* AND <-> BIC. TST has no inverted equivalent. */
21756 case T2_OPCODE_AND:
21757 new_inst = T2_OPCODE_BIC;
21764 case T2_OPCODE_BIC:
21765 new_inst = T2_OPCODE_AND;
21770 case T2_OPCODE_ADC:
21771 new_inst = T2_OPCODE_SBC;
21775 case T2_OPCODE_SBC:
21776 new_inst = T2_OPCODE_ADC;
21780 /* We cannot do anything. */
21785 if (value == (unsigned int)FAIL)
21788 *instruction &= T2_OPCODE_MASK;
21789 *instruction |= new_inst << T2_DATA_OP_SHIFT;
21793 /* Read a 32-bit thumb instruction from buf. */
21794 static unsigned long
21795 get_thumb32_insn (char * buf)
21797 unsigned long insn;
21798 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
21799 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21805 /* We usually want to set the low bit on the address of thumb function
21806 symbols. In particular .word foo - . should have the low bit set.
21807 Generic code tries to fold the difference of two symbols to
21808 a constant. Prevent this and force a relocation when the first symbols
21809 is a thumb function. */
21812 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
21814 if (op == O_subtract
21815 && l->X_op == O_symbol
21816 && r->X_op == O_symbol
21817 && THUMB_IS_FUNC (l->X_add_symbol))
21819 l->X_op = O_subtract;
21820 l->X_op_symbol = r->X_add_symbol;
21821 l->X_add_number -= r->X_add_number;
21825 /* Process as normal. */
21829 /* Encode Thumb2 unconditional branches and calls. The encoding
21830 for the 2 are identical for the immediate values. */
21833 encode_thumb2_b_bl_offset (char * buf, offsetT value)
21835 #define T2I1I2MASK ((1 << 13) | (1 << 11))
21838 addressT S, I1, I2, lo, hi;
21840 S = (value >> 24) & 0x01;
21841 I1 = (value >> 23) & 0x01;
21842 I2 = (value >> 22) & 0x01;
21843 hi = (value >> 12) & 0x3ff;
21844 lo = (value >> 1) & 0x7ff;
21845 newval = md_chars_to_number (buf, THUMB_SIZE);
21846 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
21847 newval |= (S << 10) | hi;
21848 newval2 &= ~T2I1I2MASK;
21849 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
21850 md_number_to_chars (buf, newval, THUMB_SIZE);
21851 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
21855 md_apply_fix (fixS * fixP,
21859 offsetT value = * valP;
21861 unsigned int newimm;
21862 unsigned long temp;
21864 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
21866 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
21868 /* Note whether this will delete the relocation. */
21870 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
21873 /* On a 64-bit host, silently truncate 'value' to 32 bits for
21874 consistency with the behaviour on 32-bit hosts. Remember value
21876 value &= 0xffffffff;
21877 value ^= 0x80000000;
21878 value -= 0x80000000;
21881 fixP->fx_addnumber = value;
21883 /* Same treatment for fixP->fx_offset. */
21884 fixP->fx_offset &= 0xffffffff;
21885 fixP->fx_offset ^= 0x80000000;
21886 fixP->fx_offset -= 0x80000000;
21888 switch (fixP->fx_r_type)
21890 case BFD_RELOC_NONE:
21891 /* This will need to go in the object file. */
21895 case BFD_RELOC_ARM_IMMEDIATE:
21896 /* We claim that this fixup has been processed here,
21897 even if in fact we generate an error because we do
21898 not have a reloc for it, so tc_gen_reloc will reject it. */
21901 if (fixP->fx_addsy)
21903 const char *msg = 0;
21905 if (! S_IS_DEFINED (fixP->fx_addsy))
21906 msg = _("undefined symbol %s used as an immediate value");
21907 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21908 msg = _("symbol %s is in a different section");
21909 else if (S_IS_WEAK (fixP->fx_addsy))
21910 msg = _("symbol %s is weak and may be overridden later");
21914 as_bad_where (fixP->fx_file, fixP->fx_line,
21915 msg, S_GET_NAME (fixP->fx_addsy));
21920 temp = md_chars_to_number (buf, INSN_SIZE);
21922 /* If the offset is negative, we should use encoding A2 for ADR. */
21923 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
21924 newimm = negate_data_op (&temp, value);
21927 newimm = encode_arm_immediate (value);
21929 /* If the instruction will fail, see if we can fix things up by
21930 changing the opcode. */
21931 if (newimm == (unsigned int) FAIL)
21932 newimm = negate_data_op (&temp, value);
21935 if (newimm == (unsigned int) FAIL)
21937 as_bad_where (fixP->fx_file, fixP->fx_line,
21938 _("invalid constant (%lx) after fixup"),
21939 (unsigned long) value);
21943 newimm |= (temp & 0xfffff000);
21944 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
21947 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
21949 unsigned int highpart = 0;
21950 unsigned int newinsn = 0xe1a00000; /* nop. */
21952 if (fixP->fx_addsy)
21954 const char *msg = 0;
21956 if (! S_IS_DEFINED (fixP->fx_addsy))
21957 msg = _("undefined symbol %s used as an immediate value");
21958 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
21959 msg = _("symbol %s is in a different section");
21960 else if (S_IS_WEAK (fixP->fx_addsy))
21961 msg = _("symbol %s is weak and may be overridden later");
21965 as_bad_where (fixP->fx_file, fixP->fx_line,
21966 msg, S_GET_NAME (fixP->fx_addsy));
21971 newimm = encode_arm_immediate (value);
21972 temp = md_chars_to_number (buf, INSN_SIZE);
21974 /* If the instruction will fail, see if we can fix things up by
21975 changing the opcode. */
21976 if (newimm == (unsigned int) FAIL
21977 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
21979 /* No ? OK - try using two ADD instructions to generate
21981 newimm = validate_immediate_twopart (value, & highpart);
21983 /* Yes - then make sure that the second instruction is
21985 if (newimm != (unsigned int) FAIL)
21987 /* Still No ? Try using a negated value. */
21988 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
21989 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
21990 /* Otherwise - give up. */
21993 as_bad_where (fixP->fx_file, fixP->fx_line,
21994 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
21999 /* Replace the first operand in the 2nd instruction (which
22000 is the PC) with the destination register. We have
22001 already added in the PC in the first instruction and we
22002 do not want to do it again. */
22003 newinsn &= ~ 0xf0000;
22004 newinsn |= ((newinsn & 0x0f000) << 4);
22007 newimm |= (temp & 0xfffff000);
22008 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
22010 highpart |= (newinsn & 0xfffff000);
22011 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
22015 case BFD_RELOC_ARM_OFFSET_IMM:
22016 if (!fixP->fx_done && seg->use_rela_p)
22019 case BFD_RELOC_ARM_LITERAL:
22025 if (validate_offset_imm (value, 0) == FAIL)
22027 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
22028 as_bad_where (fixP->fx_file, fixP->fx_line,
22029 _("invalid literal constant: pool needs to be closer"));
22031 as_bad_where (fixP->fx_file, fixP->fx_line,
22032 _("bad immediate value for offset (%ld)"),
22037 newval = md_chars_to_number (buf, INSN_SIZE);
22039 newval &= 0xfffff000;
22042 newval &= 0xff7ff000;
22043 newval |= value | (sign ? INDEX_UP : 0);
22045 md_number_to_chars (buf, newval, INSN_SIZE);
22048 case BFD_RELOC_ARM_OFFSET_IMM8:
22049 case BFD_RELOC_ARM_HWLITERAL:
22055 if (validate_offset_imm (value, 1) == FAIL)
22057 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
22058 as_bad_where (fixP->fx_file, fixP->fx_line,
22059 _("invalid literal constant: pool needs to be closer"));
22061 as_bad_where (fixP->fx_file, fixP->fx_line,
22062 _("bad immediate value for 8-bit offset (%ld)"),
22067 newval = md_chars_to_number (buf, INSN_SIZE);
22069 newval &= 0xfffff0f0;
22072 newval &= 0xff7ff0f0;
22073 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
22075 md_number_to_chars (buf, newval, INSN_SIZE);
22078 case BFD_RELOC_ARM_T32_OFFSET_U8:
22079 if (value < 0 || value > 1020 || value % 4 != 0)
22080 as_bad_where (fixP->fx_file, fixP->fx_line,
22081 _("bad immediate value for offset (%ld)"), (long) value);
22084 newval = md_chars_to_number (buf+2, THUMB_SIZE);
22086 md_number_to_chars (buf+2, newval, THUMB_SIZE);
22089 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22090 /* This is a complicated relocation used for all varieties of Thumb32
22091 load/store instruction with immediate offset:
22093 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
22094 *4, optional writeback(W)
22095 (doubleword load/store)
22097 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
22098 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
22099 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
22100 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
22101 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
22103 Uppercase letters indicate bits that are already encoded at
22104 this point. Lowercase letters are our problem. For the
22105 second block of instructions, the secondary opcode nybble
22106 (bits 8..11) is present, and bit 23 is zero, even if this is
22107 a PC-relative operation. */
22108 newval = md_chars_to_number (buf, THUMB_SIZE);
22110 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
22112 if ((newval & 0xf0000000) == 0xe0000000)
22114 /* Doubleword load/store: 8-bit offset, scaled by 4. */
22116 newval |= (1 << 23);
22119 if (value % 4 != 0)
22121 as_bad_where (fixP->fx_file, fixP->fx_line,
22122 _("offset not a multiple of 4"));
22128 as_bad_where (fixP->fx_file, fixP->fx_line,
22129 _("offset out of range"));
22134 else if ((newval & 0x000f0000) == 0x000f0000)
22136 /* PC-relative, 12-bit offset. */
22138 newval |= (1 << 23);
22143 as_bad_where (fixP->fx_file, fixP->fx_line,
22144 _("offset out of range"));
22149 else if ((newval & 0x00000100) == 0x00000100)
22151 /* Writeback: 8-bit, +/- offset. */
22153 newval |= (1 << 9);
22158 as_bad_where (fixP->fx_file, fixP->fx_line,
22159 _("offset out of range"));
22164 else if ((newval & 0x00000f00) == 0x00000e00)
22166 /* T-instruction: positive 8-bit offset. */
22167 if (value < 0 || value > 0xff)
22169 as_bad_where (fixP->fx_file, fixP->fx_line,
22170 _("offset out of range"));
22178 /* Positive 12-bit or negative 8-bit offset. */
22182 newval |= (1 << 23);
22192 as_bad_where (fixP->fx_file, fixP->fx_line,
22193 _("offset out of range"));
22200 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
22201 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
22204 case BFD_RELOC_ARM_SHIFT_IMM:
22205 newval = md_chars_to_number (buf, INSN_SIZE);
22206 if (((unsigned long) value) > 32
22208 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
22210 as_bad_where (fixP->fx_file, fixP->fx_line,
22211 _("shift expression is too large"));
22216 /* Shifts of zero must be done as lsl. */
22218 else if (value == 32)
22220 newval &= 0xfffff07f;
22221 newval |= (value & 0x1f) << 7;
22222 md_number_to_chars (buf, newval, INSN_SIZE);
22225 case BFD_RELOC_ARM_T32_IMMEDIATE:
22226 case BFD_RELOC_ARM_T32_ADD_IMM:
22227 case BFD_RELOC_ARM_T32_IMM12:
22228 case BFD_RELOC_ARM_T32_ADD_PC12:
22229 /* We claim that this fixup has been processed here,
22230 even if in fact we generate an error because we do
22231 not have a reloc for it, so tc_gen_reloc will reject it. */
22235 && ! S_IS_DEFINED (fixP->fx_addsy))
22237 as_bad_where (fixP->fx_file, fixP->fx_line,
22238 _("undefined symbol %s used as an immediate value"),
22239 S_GET_NAME (fixP->fx_addsy));
22243 newval = md_chars_to_number (buf, THUMB_SIZE);
22245 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
22248 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
22249 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22251 newimm = encode_thumb32_immediate (value);
22252 if (newimm == (unsigned int) FAIL)
22253 newimm = thumb32_negate_data_op (&newval, value);
22255 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
22256 && newimm == (unsigned int) FAIL)
22258 /* Turn add/sum into addw/subw. */
22259 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
22260 newval = (newval & 0xfeffffff) | 0x02000000;
22261 /* No flat 12-bit imm encoding for addsw/subsw. */
22262 if ((newval & 0x00100000) == 0)
22264 /* 12 bit immediate for addw/subw. */
22268 newval ^= 0x00a00000;
22271 newimm = (unsigned int) FAIL;
22277 if (newimm == (unsigned int)FAIL)
22279 as_bad_where (fixP->fx_file, fixP->fx_line,
22280 _("invalid constant (%lx) after fixup"),
22281 (unsigned long) value);
22285 newval |= (newimm & 0x800) << 15;
22286 newval |= (newimm & 0x700) << 4;
22287 newval |= (newimm & 0x0ff);
22289 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
22290 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
22293 case BFD_RELOC_ARM_SMC:
22294 if (((unsigned long) value) > 0xffff)
22295 as_bad_where (fixP->fx_file, fixP->fx_line,
22296 _("invalid smc expression"));
22297 newval = md_chars_to_number (buf, INSN_SIZE);
22298 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22299 md_number_to_chars (buf, newval, INSN_SIZE);
22302 case BFD_RELOC_ARM_HVC:
22303 if (((unsigned long) value) > 0xffff)
22304 as_bad_where (fixP->fx_file, fixP->fx_line,
22305 _("invalid hvc expression"));
22306 newval = md_chars_to_number (buf, INSN_SIZE);
22307 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
22308 md_number_to_chars (buf, newval, INSN_SIZE);
22311 case BFD_RELOC_ARM_SWI:
22312 if (fixP->tc_fix_data != 0)
22314 if (((unsigned long) value) > 0xff)
22315 as_bad_where (fixP->fx_file, fixP->fx_line,
22316 _("invalid swi expression"));
22317 newval = md_chars_to_number (buf, THUMB_SIZE);
22319 md_number_to_chars (buf, newval, THUMB_SIZE);
22323 if (((unsigned long) value) > 0x00ffffff)
22324 as_bad_where (fixP->fx_file, fixP->fx_line,
22325 _("invalid swi expression"));
22326 newval = md_chars_to_number (buf, INSN_SIZE);
22328 md_number_to_chars (buf, newval, INSN_SIZE);
22332 case BFD_RELOC_ARM_MULTI:
22333 if (((unsigned long) value) > 0xffff)
22334 as_bad_where (fixP->fx_file, fixP->fx_line,
22335 _("invalid expression in load/store multiple"));
22336 newval = value | md_chars_to_number (buf, INSN_SIZE);
22337 md_number_to_chars (buf, newval, INSN_SIZE);
22341 case BFD_RELOC_ARM_PCREL_CALL:
22343 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22345 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22346 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22347 && THUMB_IS_FUNC (fixP->fx_addsy))
22348 /* Flip the bl to blx. This is a simple flip
22349 bit here because we generate PCREL_CALL for
22350 unconditional bls. */
22352 newval = md_chars_to_number (buf, INSN_SIZE);
22353 newval = newval | 0x10000000;
22354 md_number_to_chars (buf, newval, INSN_SIZE);
22360 goto arm_branch_common;
22362 case BFD_RELOC_ARM_PCREL_JUMP:
22363 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22365 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22366 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22367 && THUMB_IS_FUNC (fixP->fx_addsy))
22369 /* This would map to a bl<cond>, b<cond>,
22370 b<always> to a Thumb function. We
22371 need to force a relocation for this particular
22373 newval = md_chars_to_number (buf, INSN_SIZE);
22377 case BFD_RELOC_ARM_PLT32:
22379 case BFD_RELOC_ARM_PCREL_BRANCH:
22381 goto arm_branch_common;
22383 case BFD_RELOC_ARM_PCREL_BLX:
22386 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
22388 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22389 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22390 && ARM_IS_FUNC (fixP->fx_addsy))
22392 /* Flip the blx to a bl and warn. */
22393 const char *name = S_GET_NAME (fixP->fx_addsy);
22394 newval = 0xeb000000;
22395 as_warn_where (fixP->fx_file, fixP->fx_line,
22396 _("blx to '%s' an ARM ISA state function changed to bl"),
22398 md_number_to_chars (buf, newval, INSN_SIZE);
22404 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
22405 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
22409 /* We are going to store value (shifted right by two) in the
22410 instruction, in a 24 bit, signed field. Bits 26 through 32 either
22411 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
22412 also be be clear. */
22414 as_bad_where (fixP->fx_file, fixP->fx_line,
22415 _("misaligned branch destination"));
22416 if ((value & (offsetT)0xfe000000) != (offsetT)0
22417 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
22418 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22420 if (fixP->fx_done || !seg->use_rela_p)
22422 newval = md_chars_to_number (buf, INSN_SIZE);
22423 newval |= (value >> 2) & 0x00ffffff;
22424 /* Set the H bit on BLX instructions. */
22428 newval |= 0x01000000;
22430 newval &= ~0x01000000;
22432 md_number_to_chars (buf, newval, INSN_SIZE);
22436 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
22437 /* CBZ can only branch forward. */
22439 /* Attempts to use CBZ to branch to the next instruction
22440 (which, strictly speaking, are prohibited) will be turned into
22443 FIXME: It may be better to remove the instruction completely and
22444 perform relaxation. */
22447 newval = md_chars_to_number (buf, THUMB_SIZE);
22448 newval = 0xbf00; /* NOP encoding T1 */
22449 md_number_to_chars (buf, newval, THUMB_SIZE);
22454 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22456 if (fixP->fx_done || !seg->use_rela_p)
22458 newval = md_chars_to_number (buf, THUMB_SIZE);
22459 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
22460 md_number_to_chars (buf, newval, THUMB_SIZE);
22465 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
22466 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
22467 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22469 if (fixP->fx_done || !seg->use_rela_p)
22471 newval = md_chars_to_number (buf, THUMB_SIZE);
22472 newval |= (value & 0x1ff) >> 1;
22473 md_number_to_chars (buf, newval, THUMB_SIZE);
22477 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
22478 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
22479 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22481 if (fixP->fx_done || !seg->use_rela_p)
22483 newval = md_chars_to_number (buf, THUMB_SIZE);
22484 newval |= (value & 0xfff) >> 1;
22485 md_number_to_chars (buf, newval, THUMB_SIZE);
22489 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22491 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22492 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22493 && ARM_IS_FUNC (fixP->fx_addsy)
22494 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22496 /* Force a relocation for a branch 20 bits wide. */
22499 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
22500 as_bad_where (fixP->fx_file, fixP->fx_line,
22501 _("conditional branch out of range"));
22503 if (fixP->fx_done || !seg->use_rela_p)
22506 addressT S, J1, J2, lo, hi;
22508 S = (value & 0x00100000) >> 20;
22509 J2 = (value & 0x00080000) >> 19;
22510 J1 = (value & 0x00040000) >> 18;
22511 hi = (value & 0x0003f000) >> 12;
22512 lo = (value & 0x00000ffe) >> 1;
22514 newval = md_chars_to_number (buf, THUMB_SIZE);
22515 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22516 newval |= (S << 10) | hi;
22517 newval2 |= (J1 << 13) | (J2 << 11) | lo;
22518 md_number_to_chars (buf, newval, THUMB_SIZE);
22519 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
22523 case BFD_RELOC_THUMB_PCREL_BLX:
22524 /* If there is a blx from a thumb state function to
22525 another thumb function flip this to a bl and warn
22529 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22530 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22531 && THUMB_IS_FUNC (fixP->fx_addsy))
22533 const char *name = S_GET_NAME (fixP->fx_addsy);
22534 as_warn_where (fixP->fx_file, fixP->fx_line,
22535 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
22537 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22538 newval = newval | 0x1000;
22539 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22540 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22545 goto thumb_bl_common;
22547 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22548 /* A bl from Thumb state ISA to an internal ARM state function
22549 is converted to a blx. */
22551 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22552 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22553 && ARM_IS_FUNC (fixP->fx_addsy)
22554 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22556 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
22557 newval = newval & ~0x1000;
22558 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
22559 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
22565 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22566 /* For a BLX instruction, make sure that the relocation is rounded up
22567 to a word boundary. This follows the semantics of the instruction
22568 which specifies that bit 1 of the target address will come from bit
22569 1 of the base address. */
22570 value = (value + 3) & ~ 3;
22573 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
22574 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
22575 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
22578 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
22580 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_arch_t2)))
22581 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22582 else if ((value & ~0x1ffffff)
22583 && ((value & ~0x1ffffff) != ~0x1ffffff))
22584 as_bad_where (fixP->fx_file, fixP->fx_line,
22585 _("Thumb2 branch out of range"));
22588 if (fixP->fx_done || !seg->use_rela_p)
22589 encode_thumb2_b_bl_offset (buf, value);
22593 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22594 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
22595 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
22597 if (fixP->fx_done || !seg->use_rela_p)
22598 encode_thumb2_b_bl_offset (buf, value);
22603 if (fixP->fx_done || !seg->use_rela_p)
22608 if (fixP->fx_done || !seg->use_rela_p)
22609 md_number_to_chars (buf, value, 2);
22613 case BFD_RELOC_ARM_TLS_CALL:
22614 case BFD_RELOC_ARM_THM_TLS_CALL:
22615 case BFD_RELOC_ARM_TLS_DESCSEQ:
22616 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
22617 case BFD_RELOC_ARM_TLS_GOTDESC:
22618 case BFD_RELOC_ARM_TLS_GD32:
22619 case BFD_RELOC_ARM_TLS_LE32:
22620 case BFD_RELOC_ARM_TLS_IE32:
22621 case BFD_RELOC_ARM_TLS_LDM32:
22622 case BFD_RELOC_ARM_TLS_LDO32:
22623 S_SET_THREAD_LOCAL (fixP->fx_addsy);
22626 case BFD_RELOC_ARM_GOT32:
22627 case BFD_RELOC_ARM_GOTOFF:
22630 case BFD_RELOC_ARM_GOT_PREL:
22631 if (fixP->fx_done || !seg->use_rela_p)
22632 md_number_to_chars (buf, value, 4);
22635 case BFD_RELOC_ARM_TARGET2:
22636 /* TARGET2 is not partial-inplace, so we need to write the
22637 addend here for REL targets, because it won't be written out
22638 during reloc processing later. */
22639 if (fixP->fx_done || !seg->use_rela_p)
22640 md_number_to_chars (buf, fixP->fx_offset, 4);
22644 case BFD_RELOC_RVA:
22646 case BFD_RELOC_ARM_TARGET1:
22647 case BFD_RELOC_ARM_ROSEGREL32:
22648 case BFD_RELOC_ARM_SBREL32:
22649 case BFD_RELOC_32_PCREL:
22651 case BFD_RELOC_32_SECREL:
22653 if (fixP->fx_done || !seg->use_rela_p)
22655 /* For WinCE we only do this for pcrel fixups. */
22656 if (fixP->fx_done || fixP->fx_pcrel)
22658 md_number_to_chars (buf, value, 4);
22662 case BFD_RELOC_ARM_PREL31:
22663 if (fixP->fx_done || !seg->use_rela_p)
22665 newval = md_chars_to_number (buf, 4) & 0x80000000;
22666 if ((value ^ (value >> 1)) & 0x40000000)
22668 as_bad_where (fixP->fx_file, fixP->fx_line,
22669 _("rel31 relocation overflow"));
22671 newval |= value & 0x7fffffff;
22672 md_number_to_chars (buf, newval, 4);
22677 case BFD_RELOC_ARM_CP_OFF_IMM:
22678 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22679 if (value < -1023 || value > 1023 || (value & 3))
22680 as_bad_where (fixP->fx_file, fixP->fx_line,
22681 _("co-processor offset out of range"));
22686 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22687 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22688 newval = md_chars_to_number (buf, INSN_SIZE);
22690 newval = get_thumb32_insn (buf);
22692 newval &= 0xffffff00;
22695 newval &= 0xff7fff00;
22696 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
22698 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
22699 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
22700 md_number_to_chars (buf, newval, INSN_SIZE);
22702 put_thumb32_insn (buf, newval);
22705 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
22706 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
22707 if (value < -255 || value > 255)
22708 as_bad_where (fixP->fx_file, fixP->fx_line,
22709 _("co-processor offset out of range"));
22711 goto cp_off_common;
22713 case BFD_RELOC_ARM_THUMB_OFFSET:
22714 newval = md_chars_to_number (buf, THUMB_SIZE);
22715 /* Exactly what ranges, and where the offset is inserted depends
22716 on the type of instruction, we can establish this from the
22718 switch (newval >> 12)
22720 case 4: /* PC load. */
22721 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
22722 forced to zero for these loads; md_pcrel_from has already
22723 compensated for this. */
22725 as_bad_where (fixP->fx_file, fixP->fx_line,
22726 _("invalid offset, target not word aligned (0x%08lX)"),
22727 (((unsigned long) fixP->fx_frag->fr_address
22728 + (unsigned long) fixP->fx_where) & ~3)
22729 + (unsigned long) value);
22731 if (value & ~0x3fc)
22732 as_bad_where (fixP->fx_file, fixP->fx_line,
22733 _("invalid offset, value too big (0x%08lX)"),
22736 newval |= value >> 2;
22739 case 9: /* SP load/store. */
22740 if (value & ~0x3fc)
22741 as_bad_where (fixP->fx_file, fixP->fx_line,
22742 _("invalid offset, value too big (0x%08lX)"),
22744 newval |= value >> 2;
22747 case 6: /* Word load/store. */
22749 as_bad_where (fixP->fx_file, fixP->fx_line,
22750 _("invalid offset, value too big (0x%08lX)"),
22752 newval |= value << 4; /* 6 - 2. */
22755 case 7: /* Byte load/store. */
22757 as_bad_where (fixP->fx_file, fixP->fx_line,
22758 _("invalid offset, value too big (0x%08lX)"),
22760 newval |= value << 6;
22763 case 8: /* Halfword load/store. */
22765 as_bad_where (fixP->fx_file, fixP->fx_line,
22766 _("invalid offset, value too big (0x%08lX)"),
22768 newval |= value << 5; /* 6 - 1. */
22772 as_bad_where (fixP->fx_file, fixP->fx_line,
22773 "Unable to process relocation for thumb opcode: %lx",
22774 (unsigned long) newval);
22777 md_number_to_chars (buf, newval, THUMB_SIZE);
22780 case BFD_RELOC_ARM_THUMB_ADD:
22781 /* This is a complicated relocation, since we use it for all of
22782 the following immediate relocations:
22786 9bit ADD/SUB SP word-aligned
22787 10bit ADD PC/SP word-aligned
22789 The type of instruction being processed is encoded in the
22796 newval = md_chars_to_number (buf, THUMB_SIZE);
22798 int rd = (newval >> 4) & 0xf;
22799 int rs = newval & 0xf;
22800 int subtract = !!(newval & 0x8000);
22802 /* Check for HI regs, only very restricted cases allowed:
22803 Adjusting SP, and using PC or SP to get an address. */
22804 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
22805 || (rs > 7 && rs != REG_SP && rs != REG_PC))
22806 as_bad_where (fixP->fx_file, fixP->fx_line,
22807 _("invalid Hi register with immediate"));
22809 /* If value is negative, choose the opposite instruction. */
22813 subtract = !subtract;
22815 as_bad_where (fixP->fx_file, fixP->fx_line,
22816 _("immediate value out of range"));
22821 if (value & ~0x1fc)
22822 as_bad_where (fixP->fx_file, fixP->fx_line,
22823 _("invalid immediate for stack address calculation"));
22824 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
22825 newval |= value >> 2;
22827 else if (rs == REG_PC || rs == REG_SP)
22829 if (subtract || value & ~0x3fc)
22830 as_bad_where (fixP->fx_file, fixP->fx_line,
22831 _("invalid immediate for address calculation (value = 0x%08lX)"),
22832 (unsigned long) value);
22833 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
22835 newval |= value >> 2;
22840 as_bad_where (fixP->fx_file, fixP->fx_line,
22841 _("immediate value out of range"));
22842 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
22843 newval |= (rd << 8) | value;
22848 as_bad_where (fixP->fx_file, fixP->fx_line,
22849 _("immediate value out of range"));
22850 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
22851 newval |= rd | (rs << 3) | (value << 6);
22854 md_number_to_chars (buf, newval, THUMB_SIZE);
22857 case BFD_RELOC_ARM_THUMB_IMM:
22858 newval = md_chars_to_number (buf, THUMB_SIZE);
22859 if (value < 0 || value > 255)
22860 as_bad_where (fixP->fx_file, fixP->fx_line,
22861 _("invalid immediate: %ld is out of range"),
22864 md_number_to_chars (buf, newval, THUMB_SIZE);
22867 case BFD_RELOC_ARM_THUMB_SHIFT:
22868 /* 5bit shift value (0..32). LSL cannot take 32. */
22869 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
22870 temp = newval & 0xf800;
22871 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
22872 as_bad_where (fixP->fx_file, fixP->fx_line,
22873 _("invalid shift value: %ld"), (long) value);
22874 /* Shifts of zero must be encoded as LSL. */
22876 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
22877 /* Shifts of 32 are encoded as zero. */
22878 else if (value == 32)
22880 newval |= value << 6;
22881 md_number_to_chars (buf, newval, THUMB_SIZE);
22884 case BFD_RELOC_VTABLE_INHERIT:
22885 case BFD_RELOC_VTABLE_ENTRY:
22889 case BFD_RELOC_ARM_MOVW:
22890 case BFD_RELOC_ARM_MOVT:
22891 case BFD_RELOC_ARM_THUMB_MOVW:
22892 case BFD_RELOC_ARM_THUMB_MOVT:
22893 if (fixP->fx_done || !seg->use_rela_p)
22895 /* REL format relocations are limited to a 16-bit addend. */
22896 if (!fixP->fx_done)
22898 if (value < -0x8000 || value > 0x7fff)
22899 as_bad_where (fixP->fx_file, fixP->fx_line,
22900 _("offset out of range"));
22902 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
22903 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22908 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
22909 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
22911 newval = get_thumb32_insn (buf);
22912 newval &= 0xfbf08f00;
22913 newval |= (value & 0xf000) << 4;
22914 newval |= (value & 0x0800) << 15;
22915 newval |= (value & 0x0700) << 4;
22916 newval |= (value & 0x00ff);
22917 put_thumb32_insn (buf, newval);
22921 newval = md_chars_to_number (buf, 4);
22922 newval &= 0xfff0f000;
22923 newval |= value & 0x0fff;
22924 newval |= (value & 0xf000) << 4;
22925 md_number_to_chars (buf, newval, 4);
22930 case BFD_RELOC_ARM_ALU_PC_G0_NC:
22931 case BFD_RELOC_ARM_ALU_PC_G0:
22932 case BFD_RELOC_ARM_ALU_PC_G1_NC:
22933 case BFD_RELOC_ARM_ALU_PC_G1:
22934 case BFD_RELOC_ARM_ALU_PC_G2:
22935 case BFD_RELOC_ARM_ALU_SB_G0_NC:
22936 case BFD_RELOC_ARM_ALU_SB_G0:
22937 case BFD_RELOC_ARM_ALU_SB_G1_NC:
22938 case BFD_RELOC_ARM_ALU_SB_G1:
22939 case BFD_RELOC_ARM_ALU_SB_G2:
22940 gas_assert (!fixP->fx_done);
22941 if (!seg->use_rela_p)
22944 bfd_vma encoded_addend;
22945 bfd_vma addend_abs = abs (value);
22947 /* Check that the absolute value of the addend can be
22948 expressed as an 8-bit constant plus a rotation. */
22949 encoded_addend = encode_arm_immediate (addend_abs);
22950 if (encoded_addend == (unsigned int) FAIL)
22951 as_bad_where (fixP->fx_file, fixP->fx_line,
22952 _("the offset 0x%08lX is not representable"),
22953 (unsigned long) addend_abs);
22955 /* Extract the instruction. */
22956 insn = md_chars_to_number (buf, INSN_SIZE);
22958 /* If the addend is positive, use an ADD instruction.
22959 Otherwise use a SUB. Take care not to destroy the S bit. */
22960 insn &= 0xff1fffff;
22966 /* Place the encoded addend into the first 12 bits of the
22968 insn &= 0xfffff000;
22969 insn |= encoded_addend;
22971 /* Update the instruction. */
22972 md_number_to_chars (buf, insn, INSN_SIZE);
22976 case BFD_RELOC_ARM_LDR_PC_G0:
22977 case BFD_RELOC_ARM_LDR_PC_G1:
22978 case BFD_RELOC_ARM_LDR_PC_G2:
22979 case BFD_RELOC_ARM_LDR_SB_G0:
22980 case BFD_RELOC_ARM_LDR_SB_G1:
22981 case BFD_RELOC_ARM_LDR_SB_G2:
22982 gas_assert (!fixP->fx_done);
22983 if (!seg->use_rela_p)
22986 bfd_vma addend_abs = abs (value);
22988 /* Check that the absolute value of the addend can be
22989 encoded in 12 bits. */
22990 if (addend_abs >= 0x1000)
22991 as_bad_where (fixP->fx_file, fixP->fx_line,
22992 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
22993 (unsigned long) addend_abs);
22995 /* Extract the instruction. */
22996 insn = md_chars_to_number (buf, INSN_SIZE);
22998 /* If the addend is negative, clear bit 23 of the instruction.
22999 Otherwise set it. */
23001 insn &= ~(1 << 23);
23005 /* Place the absolute value of the addend into the first 12 bits
23006 of the instruction. */
23007 insn &= 0xfffff000;
23008 insn |= addend_abs;
23010 /* Update the instruction. */
23011 md_number_to_chars (buf, insn, INSN_SIZE);
23015 case BFD_RELOC_ARM_LDRS_PC_G0:
23016 case BFD_RELOC_ARM_LDRS_PC_G1:
23017 case BFD_RELOC_ARM_LDRS_PC_G2:
23018 case BFD_RELOC_ARM_LDRS_SB_G0:
23019 case BFD_RELOC_ARM_LDRS_SB_G1:
23020 case BFD_RELOC_ARM_LDRS_SB_G2:
23021 gas_assert (!fixP->fx_done);
23022 if (!seg->use_rela_p)
23025 bfd_vma addend_abs = abs (value);
23027 /* Check that the absolute value of the addend can be
23028 encoded in 8 bits. */
23029 if (addend_abs >= 0x100)
23030 as_bad_where (fixP->fx_file, fixP->fx_line,
23031 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
23032 (unsigned long) addend_abs);
23034 /* Extract the instruction. */
23035 insn = md_chars_to_number (buf, INSN_SIZE);
23037 /* If the addend is negative, clear bit 23 of the instruction.
23038 Otherwise set it. */
23040 insn &= ~(1 << 23);
23044 /* Place the first four bits of the absolute value of the addend
23045 into the first 4 bits of the instruction, and the remaining
23046 four into bits 8 .. 11. */
23047 insn &= 0xfffff0f0;
23048 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
23050 /* Update the instruction. */
23051 md_number_to_chars (buf, insn, INSN_SIZE);
23055 case BFD_RELOC_ARM_LDC_PC_G0:
23056 case BFD_RELOC_ARM_LDC_PC_G1:
23057 case BFD_RELOC_ARM_LDC_PC_G2:
23058 case BFD_RELOC_ARM_LDC_SB_G0:
23059 case BFD_RELOC_ARM_LDC_SB_G1:
23060 case BFD_RELOC_ARM_LDC_SB_G2:
23061 gas_assert (!fixP->fx_done);
23062 if (!seg->use_rela_p)
23065 bfd_vma addend_abs = abs (value);
23067 /* Check that the absolute value of the addend is a multiple of
23068 four and, when divided by four, fits in 8 bits. */
23069 if (addend_abs & 0x3)
23070 as_bad_where (fixP->fx_file, fixP->fx_line,
23071 _("bad offset 0x%08lX (must be word-aligned)"),
23072 (unsigned long) addend_abs);
23074 if ((addend_abs >> 2) > 0xff)
23075 as_bad_where (fixP->fx_file, fixP->fx_line,
23076 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
23077 (unsigned long) addend_abs);
23079 /* Extract the instruction. */
23080 insn = md_chars_to_number (buf, INSN_SIZE);
23082 /* If the addend is negative, clear bit 23 of the instruction.
23083 Otherwise set it. */
23085 insn &= ~(1 << 23);
23089 /* Place the addend (divided by four) into the first eight
23090 bits of the instruction. */
23091 insn &= 0xfffffff0;
23092 insn |= addend_abs >> 2;
23094 /* Update the instruction. */
23095 md_number_to_chars (buf, insn, INSN_SIZE);
23099 case BFD_RELOC_ARM_V4BX:
23100 /* This will need to go in the object file. */
23104 case BFD_RELOC_UNUSED:
23106 as_bad_where (fixP->fx_file, fixP->fx_line,
23107 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
23111 /* Translate internal representation of relocation info to BFD target
23115 tc_gen_reloc (asection *section, fixS *fixp)
23118 bfd_reloc_code_real_type code;
23120 reloc = (arelent *) xmalloc (sizeof (arelent));
23122 reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
23123 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
23124 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
23126 if (fixp->fx_pcrel)
23128 if (section->use_rela_p)
23129 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
23131 fixp->fx_offset = reloc->address;
23133 reloc->addend = fixp->fx_offset;
23135 switch (fixp->fx_r_type)
23138 if (fixp->fx_pcrel)
23140 code = BFD_RELOC_8_PCREL;
23145 if (fixp->fx_pcrel)
23147 code = BFD_RELOC_16_PCREL;
23152 if (fixp->fx_pcrel)
23154 code = BFD_RELOC_32_PCREL;
23158 case BFD_RELOC_ARM_MOVW:
23159 if (fixp->fx_pcrel)
23161 code = BFD_RELOC_ARM_MOVW_PCREL;
23165 case BFD_RELOC_ARM_MOVT:
23166 if (fixp->fx_pcrel)
23168 code = BFD_RELOC_ARM_MOVT_PCREL;
23172 case BFD_RELOC_ARM_THUMB_MOVW:
23173 if (fixp->fx_pcrel)
23175 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
23179 case BFD_RELOC_ARM_THUMB_MOVT:
23180 if (fixp->fx_pcrel)
23182 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
23186 case BFD_RELOC_NONE:
23187 case BFD_RELOC_ARM_PCREL_BRANCH:
23188 case BFD_RELOC_ARM_PCREL_BLX:
23189 case BFD_RELOC_RVA:
23190 case BFD_RELOC_THUMB_PCREL_BRANCH7:
23191 case BFD_RELOC_THUMB_PCREL_BRANCH9:
23192 case BFD_RELOC_THUMB_PCREL_BRANCH12:
23193 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23194 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23195 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23196 case BFD_RELOC_VTABLE_ENTRY:
23197 case BFD_RELOC_VTABLE_INHERIT:
23199 case BFD_RELOC_32_SECREL:
23201 code = fixp->fx_r_type;
23204 case BFD_RELOC_THUMB_PCREL_BLX:
23206 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23207 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
23210 code = BFD_RELOC_THUMB_PCREL_BLX;
23213 case BFD_RELOC_ARM_LITERAL:
23214 case BFD_RELOC_ARM_HWLITERAL:
23215 /* If this is called then the a literal has
23216 been referenced across a section boundary. */
23217 as_bad_where (fixp->fx_file, fixp->fx_line,
23218 _("literal referenced across section boundary"));
23222 case BFD_RELOC_ARM_TLS_CALL:
23223 case BFD_RELOC_ARM_THM_TLS_CALL:
23224 case BFD_RELOC_ARM_TLS_DESCSEQ:
23225 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
23226 case BFD_RELOC_ARM_GOT32:
23227 case BFD_RELOC_ARM_GOTOFF:
23228 case BFD_RELOC_ARM_GOT_PREL:
23229 case BFD_RELOC_ARM_PLT32:
23230 case BFD_RELOC_ARM_TARGET1:
23231 case BFD_RELOC_ARM_ROSEGREL32:
23232 case BFD_RELOC_ARM_SBREL32:
23233 case BFD_RELOC_ARM_PREL31:
23234 case BFD_RELOC_ARM_TARGET2:
23235 case BFD_RELOC_ARM_TLS_LE32:
23236 case BFD_RELOC_ARM_TLS_LDO32:
23237 case BFD_RELOC_ARM_PCREL_CALL:
23238 case BFD_RELOC_ARM_PCREL_JUMP:
23239 case BFD_RELOC_ARM_ALU_PC_G0_NC:
23240 case BFD_RELOC_ARM_ALU_PC_G0:
23241 case BFD_RELOC_ARM_ALU_PC_G1_NC:
23242 case BFD_RELOC_ARM_ALU_PC_G1:
23243 case BFD_RELOC_ARM_ALU_PC_G2:
23244 case BFD_RELOC_ARM_LDR_PC_G0:
23245 case BFD_RELOC_ARM_LDR_PC_G1:
23246 case BFD_RELOC_ARM_LDR_PC_G2:
23247 case BFD_RELOC_ARM_LDRS_PC_G0:
23248 case BFD_RELOC_ARM_LDRS_PC_G1:
23249 case BFD_RELOC_ARM_LDRS_PC_G2:
23250 case BFD_RELOC_ARM_LDC_PC_G0:
23251 case BFD_RELOC_ARM_LDC_PC_G1:
23252 case BFD_RELOC_ARM_LDC_PC_G2:
23253 case BFD_RELOC_ARM_ALU_SB_G0_NC:
23254 case BFD_RELOC_ARM_ALU_SB_G0:
23255 case BFD_RELOC_ARM_ALU_SB_G1_NC:
23256 case BFD_RELOC_ARM_ALU_SB_G1:
23257 case BFD_RELOC_ARM_ALU_SB_G2:
23258 case BFD_RELOC_ARM_LDR_SB_G0:
23259 case BFD_RELOC_ARM_LDR_SB_G1:
23260 case BFD_RELOC_ARM_LDR_SB_G2:
23261 case BFD_RELOC_ARM_LDRS_SB_G0:
23262 case BFD_RELOC_ARM_LDRS_SB_G1:
23263 case BFD_RELOC_ARM_LDRS_SB_G2:
23264 case BFD_RELOC_ARM_LDC_SB_G0:
23265 case BFD_RELOC_ARM_LDC_SB_G1:
23266 case BFD_RELOC_ARM_LDC_SB_G2:
23267 case BFD_RELOC_ARM_V4BX:
23268 code = fixp->fx_r_type;
23271 case BFD_RELOC_ARM_TLS_GOTDESC:
23272 case BFD_RELOC_ARM_TLS_GD32:
23273 case BFD_RELOC_ARM_TLS_IE32:
23274 case BFD_RELOC_ARM_TLS_LDM32:
23275 /* BFD will include the symbol's address in the addend.
23276 But we don't want that, so subtract it out again here. */
23277 if (!S_IS_COMMON (fixp->fx_addsy))
23278 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
23279 code = fixp->fx_r_type;
23283 case BFD_RELOC_ARM_IMMEDIATE:
23284 as_bad_where (fixp->fx_file, fixp->fx_line,
23285 _("internal relocation (type: IMMEDIATE) not fixed up"));
23288 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23289 as_bad_where (fixp->fx_file, fixp->fx_line,
23290 _("ADRL used for a symbol not defined in the same file"));
23293 case BFD_RELOC_ARM_OFFSET_IMM:
23294 if (section->use_rela_p)
23296 code = fixp->fx_r_type;
23300 if (fixp->fx_addsy != NULL
23301 && !S_IS_DEFINED (fixp->fx_addsy)
23302 && S_IS_LOCAL (fixp->fx_addsy))
23304 as_bad_where (fixp->fx_file, fixp->fx_line,
23305 _("undefined local label `%s'"),
23306 S_GET_NAME (fixp->fx_addsy));
23310 as_bad_where (fixp->fx_file, fixp->fx_line,
23311 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
23318 switch (fixp->fx_r_type)
23320 case BFD_RELOC_NONE: type = "NONE"; break;
23321 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
23322 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
23323 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
23324 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
23325 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
23326 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
23327 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
23328 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
23329 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
23330 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
23331 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
23332 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
23333 default: type = _("<unknown>"); break;
23335 as_bad_where (fixp->fx_file, fixp->fx_line,
23336 _("cannot represent %s relocation in this object file format"),
23343 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
23345 && fixp->fx_addsy == GOT_symbol)
23347 code = BFD_RELOC_ARM_GOTPC;
23348 reloc->addend = fixp->fx_offset = reloc->address;
23352 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
23354 if (reloc->howto == NULL)
23356 as_bad_where (fixp->fx_file, fixp->fx_line,
23357 _("cannot represent %s relocation in this object file format"),
23358 bfd_get_reloc_code_name (code));
23362 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
23363 vtable entry to be used in the relocation's section offset. */
23364 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23365 reloc->address = fixp->fx_offset;
23370 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
23373 cons_fix_new_arm (fragS * frag,
23377 bfd_reloc_code_real_type reloc)
23382 FIXME: @@ Should look at CPU word size. */
23386 reloc = BFD_RELOC_8;
23389 reloc = BFD_RELOC_16;
23393 reloc = BFD_RELOC_32;
23396 reloc = BFD_RELOC_64;
23401 if (exp->X_op == O_secrel)
23403 exp->X_op = O_symbol;
23404 reloc = BFD_RELOC_32_SECREL;
23408 fix_new_exp (frag, where, size, exp, pcrel, reloc);
23411 #if defined (OBJ_COFF)
23413 arm_validate_fix (fixS * fixP)
23415 /* If the destination of the branch is a defined symbol which does not have
23416 the THUMB_FUNC attribute, then we must be calling a function which has
23417 the (interfacearm) attribute. We look for the Thumb entry point to that
23418 function and change the branch to refer to that function instead. */
23419 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
23420 && fixP->fx_addsy != NULL
23421 && S_IS_DEFINED (fixP->fx_addsy)
23422 && ! THUMB_IS_FUNC (fixP->fx_addsy))
23424 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
23431 arm_force_relocation (struct fix * fixp)
23433 #if defined (OBJ_COFF) && defined (TE_PE)
23434 if (fixp->fx_r_type == BFD_RELOC_RVA)
23438 /* In case we have a call or a branch to a function in ARM ISA mode from
23439 a thumb function or vice-versa force the relocation. These relocations
23440 are cleared off for some cores that might have blx and simple transformations
23444 switch (fixp->fx_r_type)
23446 case BFD_RELOC_ARM_PCREL_JUMP:
23447 case BFD_RELOC_ARM_PCREL_CALL:
23448 case BFD_RELOC_THUMB_PCREL_BLX:
23449 if (THUMB_IS_FUNC (fixp->fx_addsy))
23453 case BFD_RELOC_ARM_PCREL_BLX:
23454 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23455 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23456 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23457 if (ARM_IS_FUNC (fixp->fx_addsy))
23466 /* Resolve these relocations even if the symbol is extern or weak.
23467 Technically this is probably wrong due to symbol preemption.
23468 In practice these relocations do not have enough range to be useful
23469 at dynamic link time, and some code (e.g. in the Linux kernel)
23470 expects these references to be resolved. */
23471 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
23472 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
23473 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
23474 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
23475 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
23476 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
23477 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
23478 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
23479 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23480 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
23481 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
23482 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
23483 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
23484 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
23487 /* Always leave these relocations for the linker. */
23488 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23489 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23490 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23493 /* Always generate relocations against function symbols. */
23494 if (fixp->fx_r_type == BFD_RELOC_32
23496 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
23499 return generic_force_reloc (fixp);
23502 #if defined (OBJ_ELF) || defined (OBJ_COFF)
23503 /* Relocations against function names must be left unadjusted,
23504 so that the linker can use this information to generate interworking
23505 stubs. The MIPS version of this function
23506 also prevents relocations that are mips-16 specific, but I do not
23507 know why it does this.
23510 There is one other problem that ought to be addressed here, but
23511 which currently is not: Taking the address of a label (rather
23512 than a function) and then later jumping to that address. Such
23513 addresses also ought to have their bottom bit set (assuming that
23514 they reside in Thumb code), but at the moment they will not. */
23517 arm_fix_adjustable (fixS * fixP)
23519 if (fixP->fx_addsy == NULL)
23522 /* Preserve relocations against symbols with function type. */
23523 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
23526 if (THUMB_IS_FUNC (fixP->fx_addsy)
23527 && fixP->fx_subsy == NULL)
23530 /* We need the symbol name for the VTABLE entries. */
23531 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
23532 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
23535 /* Don't allow symbols to be discarded on GOT related relocs. */
23536 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
23537 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
23538 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
23539 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
23540 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
23541 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
23542 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
23543 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
23544 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
23545 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
23546 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
23547 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
23548 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
23549 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
23552 /* Similarly for group relocations. */
23553 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
23554 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
23555 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
23558 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
23559 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
23560 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
23561 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
23562 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
23563 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
23564 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
23565 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
23566 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
23571 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
23576 elf32_arm_target_format (void)
23579 return (target_big_endian
23580 ? "elf32-bigarm-symbian"
23581 : "elf32-littlearm-symbian");
23582 #elif defined (TE_VXWORKS)
23583 return (target_big_endian
23584 ? "elf32-bigarm-vxworks"
23585 : "elf32-littlearm-vxworks");
23586 #elif defined (TE_NACL)
23587 return (target_big_endian
23588 ? "elf32-bigarm-nacl"
23589 : "elf32-littlearm-nacl");
23591 if (target_big_endian)
23592 return "elf32-bigarm";
23594 return "elf32-littlearm";
23599 armelf_frob_symbol (symbolS * symp,
23602 elf_frob_symbol (symp, puntp);
23606 /* MD interface: Finalization. */
23611 literal_pool * pool;
23613 /* Ensure that all the IT blocks are properly closed. */
23614 check_it_blocks_finished ();
23616 for (pool = list_of_pools; pool; pool = pool->next)
23618 /* Put it at the end of the relevant section. */
23619 subseg_set (pool->section, pool->sub_section);
23621 arm_elf_change_section ();
23628 /* Remove any excess mapping symbols generated for alignment frags in
23629 SEC. We may have created a mapping symbol before a zero byte
23630 alignment; remove it if there's a mapping symbol after the
23633 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
23634 void *dummy ATTRIBUTE_UNUSED)
23636 segment_info_type *seginfo = seg_info (sec);
23639 if (seginfo == NULL || seginfo->frchainP == NULL)
23642 for (fragp = seginfo->frchainP->frch_root;
23644 fragp = fragp->fr_next)
23646 symbolS *sym = fragp->tc_frag_data.last_map;
23647 fragS *next = fragp->fr_next;
23649 /* Variable-sized frags have been converted to fixed size by
23650 this point. But if this was variable-sized to start with,
23651 there will be a fixed-size frag after it. So don't handle
23653 if (sym == NULL || next == NULL)
23656 if (S_GET_VALUE (sym) < next->fr_address)
23657 /* Not at the end of this frag. */
23659 know (S_GET_VALUE (sym) == next->fr_address);
23663 if (next->tc_frag_data.first_map != NULL)
23665 /* Next frag starts with a mapping symbol. Discard this
23667 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23671 if (next->fr_next == NULL)
23673 /* This mapping symbol is at the end of the section. Discard
23675 know (next->fr_fix == 0 && next->fr_var == 0);
23676 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
23680 /* As long as we have empty frags without any mapping symbols,
23682 /* If the next frag is non-empty and does not start with a
23683 mapping symbol, then this mapping symbol is required. */
23684 if (next->fr_address != next->fr_next->fr_address)
23687 next = next->fr_next;
23689 while (next != NULL);
23694 /* Adjust the symbol table. This marks Thumb symbols as distinct from
23698 arm_adjust_symtab (void)
23703 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23705 if (ARM_IS_THUMB (sym))
23707 if (THUMB_IS_FUNC (sym))
23709 /* Mark the symbol as a Thumb function. */
23710 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
23711 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
23712 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
23714 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
23715 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
23717 as_bad (_("%s: unexpected function type: %d"),
23718 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
23720 else switch (S_GET_STORAGE_CLASS (sym))
23723 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
23726 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
23729 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
23737 if (ARM_IS_INTERWORK (sym))
23738 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
23745 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
23747 if (ARM_IS_THUMB (sym))
23749 elf_symbol_type * elf_sym;
23751 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
23752 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
23754 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
23755 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
23757 /* If it's a .thumb_func, declare it as so,
23758 otherwise tag label as .code 16. */
23759 if (THUMB_IS_FUNC (sym))
23760 elf_sym->internal_elf_sym.st_target_internal
23761 = ST_BRANCH_TO_THUMB;
23762 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
23763 elf_sym->internal_elf_sym.st_info =
23764 ELF_ST_INFO (bind, STT_ARM_16BIT);
23769 /* Remove any overlapping mapping symbols generated by alignment frags. */
23770 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
23771 /* Now do generic ELF adjustments. */
23772 elf_adjust_symtab ();
23776 /* MD interface: Initialization. */
23779 set_constant_flonums (void)
23783 for (i = 0; i < NUM_FLOAT_VALS; i++)
23784 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
23788 /* Auto-select Thumb mode if it's the only available instruction set for the
23789 given architecture. */
23792 autoselect_thumb_from_cpu_variant (void)
23794 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23795 opcode_select (16);
23804 if ( (arm_ops_hsh = hash_new ()) == NULL
23805 || (arm_cond_hsh = hash_new ()) == NULL
23806 || (arm_shift_hsh = hash_new ()) == NULL
23807 || (arm_psr_hsh = hash_new ()) == NULL
23808 || (arm_v7m_psr_hsh = hash_new ()) == NULL
23809 || (arm_reg_hsh = hash_new ()) == NULL
23810 || (arm_reloc_hsh = hash_new ()) == NULL
23811 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
23812 as_fatal (_("virtual memory exhausted"));
23814 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
23815 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
23816 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
23817 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
23818 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
23819 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
23820 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
23821 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
23822 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
23823 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
23824 (void *) (v7m_psrs + i));
23825 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
23826 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
23828 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
23830 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
23831 (void *) (barrier_opt_names + i));
23833 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
23835 struct reloc_entry * entry = reloc_names + i;
23837 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
23838 /* This makes encode_branch() use the EABI versions of this relocation. */
23839 entry->reloc = BFD_RELOC_UNUSED;
23841 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
23845 set_constant_flonums ();
23847 /* Set the cpu variant based on the command-line options. We prefer
23848 -mcpu= over -march= if both are set (as for GCC); and we prefer
23849 -mfpu= over any other way of setting the floating point unit.
23850 Use of legacy options with new options are faulted. */
23853 if (mcpu_cpu_opt || march_cpu_opt)
23854 as_bad (_("use of old and new-style options to set CPU type"));
23856 mcpu_cpu_opt = legacy_cpu;
23858 else if (!mcpu_cpu_opt)
23859 mcpu_cpu_opt = march_cpu_opt;
23864 as_bad (_("use of old and new-style options to set FPU type"));
23866 mfpu_opt = legacy_fpu;
23868 else if (!mfpu_opt)
23870 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
23871 || defined (TE_NetBSD) || defined (TE_VXWORKS))
23872 /* Some environments specify a default FPU. If they don't, infer it
23873 from the processor. */
23875 mfpu_opt = mcpu_fpu_opt;
23877 mfpu_opt = march_fpu_opt;
23879 mfpu_opt = &fpu_default;
23885 if (mcpu_cpu_opt != NULL)
23886 mfpu_opt = &fpu_default;
23887 else if (mcpu_fpu_opt != NULL && ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
23888 mfpu_opt = &fpu_arch_vfp_v2;
23890 mfpu_opt = &fpu_arch_fpa;
23896 mcpu_cpu_opt = &cpu_default;
23897 selected_cpu = cpu_default;
23901 selected_cpu = *mcpu_cpu_opt;
23903 mcpu_cpu_opt = &arm_arch_any;
23906 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
23908 autoselect_thumb_from_cpu_variant ();
23910 arm_arch_used = thumb_arch_used = arm_arch_none;
23912 #if defined OBJ_COFF || defined OBJ_ELF
23914 unsigned int flags = 0;
23916 #if defined OBJ_ELF
23917 flags = meabi_flags;
23919 switch (meabi_flags)
23921 case EF_ARM_EABI_UNKNOWN:
23923 /* Set the flags in the private structure. */
23924 if (uses_apcs_26) flags |= F_APCS26;
23925 if (support_interwork) flags |= F_INTERWORK;
23926 if (uses_apcs_float) flags |= F_APCS_FLOAT;
23927 if (pic_code) flags |= F_PIC;
23928 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
23929 flags |= F_SOFT_FLOAT;
23931 switch (mfloat_abi_opt)
23933 case ARM_FLOAT_ABI_SOFT:
23934 case ARM_FLOAT_ABI_SOFTFP:
23935 flags |= F_SOFT_FLOAT;
23938 case ARM_FLOAT_ABI_HARD:
23939 if (flags & F_SOFT_FLOAT)
23940 as_bad (_("hard-float conflicts with specified fpu"));
23944 /* Using pure-endian doubles (even if soft-float). */
23945 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
23946 flags |= F_VFP_FLOAT;
23948 #if defined OBJ_ELF
23949 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
23950 flags |= EF_ARM_MAVERICK_FLOAT;
23953 case EF_ARM_EABI_VER4:
23954 case EF_ARM_EABI_VER5:
23955 /* No additional flags to set. */
23962 bfd_set_private_flags (stdoutput, flags);
23964 /* We have run out flags in the COFF header to encode the
23965 status of ATPCS support, so instead we create a dummy,
23966 empty, debug section called .arm.atpcs. */
23971 sec = bfd_make_section (stdoutput, ".arm.atpcs");
23975 bfd_set_section_flags
23976 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
23977 bfd_set_section_size (stdoutput, sec, 0);
23978 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
23984 /* Record the CPU type as well. */
23985 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
23986 mach = bfd_mach_arm_iWMMXt2;
23987 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
23988 mach = bfd_mach_arm_iWMMXt;
23989 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
23990 mach = bfd_mach_arm_XScale;
23991 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
23992 mach = bfd_mach_arm_ep9312;
23993 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
23994 mach = bfd_mach_arm_5TE;
23995 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
23997 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
23998 mach = bfd_mach_arm_5T;
24000 mach = bfd_mach_arm_5;
24002 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
24004 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
24005 mach = bfd_mach_arm_4T;
24007 mach = bfd_mach_arm_4;
24009 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
24010 mach = bfd_mach_arm_3M;
24011 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
24012 mach = bfd_mach_arm_3;
24013 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
24014 mach = bfd_mach_arm_2a;
24015 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
24016 mach = bfd_mach_arm_2;
24018 mach = bfd_mach_arm_unknown;
24020 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
24023 /* Command line processing. */
24026 Invocation line includes a switch not recognized by the base assembler.
24027 See if it's a processor-specific option.
24029 This routine is somewhat complicated by the need for backwards
24030 compatibility (since older releases of gcc can't be changed).
24031 The new options try to make the interface as compatible as
24034 New options (supported) are:
24036 -mcpu=<cpu name> Assemble for selected processor
24037 -march=<architecture name> Assemble for selected architecture
24038 -mfpu=<fpu architecture> Assemble for selected FPU.
24039 -EB/-mbig-endian Big-endian
24040 -EL/-mlittle-endian Little-endian
24041 -k Generate PIC code
24042 -mthumb Start in Thumb mode
24043 -mthumb-interwork Code supports ARM/Thumb interworking
24045 -m[no-]warn-deprecated Warn about deprecated features
24047 For now we will also provide support for:
24049 -mapcs-32 32-bit Program counter
24050 -mapcs-26 26-bit Program counter
24051 -macps-float Floats passed in FP registers
24052 -mapcs-reentrant Reentrant code
24054 (sometime these will probably be replaced with -mapcs=<list of options>
24055 and -matpcs=<list of options>)
24057 The remaining options are only supported for back-wards compatibility.
24058 Cpu variants, the arm part is optional:
24059 -m[arm]1 Currently not supported.
24060 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
24061 -m[arm]3 Arm 3 processor
24062 -m[arm]6[xx], Arm 6 processors
24063 -m[arm]7[xx][t][[d]m] Arm 7 processors
24064 -m[arm]8[10] Arm 8 processors
24065 -m[arm]9[20][tdmi] Arm 9 processors
24066 -mstrongarm[110[0]] StrongARM processors
24067 -mxscale XScale processors
24068 -m[arm]v[2345[t[e]]] Arm architectures
24069 -mall All (except the ARM1)
24071 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
24072 -mfpe-old (No float load/store multiples)
24073 -mvfpxd VFP Single precision
24075 -mno-fpu Disable all floating point instructions
24077 The following CPU names are recognized:
24078 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
24079 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
24080 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
24081 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
24082 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
24083 arm10t arm10e, arm1020t, arm1020e, arm10200e,
24084 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
24088 const char * md_shortopts = "m:k";
24090 #ifdef ARM_BI_ENDIAN
24091 #define OPTION_EB (OPTION_MD_BASE + 0)
24092 #define OPTION_EL (OPTION_MD_BASE + 1)
24094 #if TARGET_BYTES_BIG_ENDIAN
24095 #define OPTION_EB (OPTION_MD_BASE + 0)
24097 #define OPTION_EL (OPTION_MD_BASE + 1)
24100 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
24102 struct option md_longopts[] =
24105 {"EB", no_argument, NULL, OPTION_EB},
24108 {"EL", no_argument, NULL, OPTION_EL},
24110 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
24111 {NULL, no_argument, NULL, 0}
24114 size_t md_longopts_size = sizeof (md_longopts);
24116 struct arm_option_table
24118 char *option; /* Option name to match. */
24119 char *help; /* Help information. */
24120 int *var; /* Variable to change. */
24121 int value; /* What to change it to. */
24122 char *deprecated; /* If non-null, print this message. */
24125 struct arm_option_table arm_opts[] =
24127 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
24128 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
24129 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
24130 &support_interwork, 1, NULL},
24131 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
24132 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
24133 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
24135 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
24136 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
24137 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
24138 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
24141 /* These are recognized by the assembler, but have no affect on code. */
24142 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
24143 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
24145 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
24146 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
24147 &warn_on_deprecated, 0, NULL},
24148 {NULL, NULL, NULL, 0, NULL}
24151 struct arm_legacy_option_table
24153 char *option; /* Option name to match. */
24154 const arm_feature_set **var; /* Variable to change. */
24155 const arm_feature_set value; /* What to change it to. */
24156 char *deprecated; /* If non-null, print this message. */
24159 const struct arm_legacy_option_table arm_legacy_opts[] =
24161 /* DON'T add any new processors to this list -- we want the whole list
24162 to go away... Add them to the processors table instead. */
24163 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24164 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
24165 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24166 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
24167 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24168 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
24169 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24170 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
24171 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24172 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
24173 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24174 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
24175 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24176 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
24177 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24178 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
24179 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24180 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
24181 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24182 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
24183 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24184 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
24185 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24186 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
24187 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24188 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
24189 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24190 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
24191 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24192 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
24193 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24194 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
24195 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24196 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
24197 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24198 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
24199 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24200 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
24201 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24202 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
24203 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24204 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
24205 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24206 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
24207 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24208 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
24209 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24210 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24211 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24212 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
24213 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24214 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
24215 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24216 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
24217 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24218 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
24219 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24220 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
24221 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24222 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
24223 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24224 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
24225 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24226 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
24227 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24228 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
24229 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24230 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
24231 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
24232 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
24233 N_("use -mcpu=strongarm110")},
24234 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
24235 N_("use -mcpu=strongarm1100")},
24236 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
24237 N_("use -mcpu=strongarm1110")},
24238 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
24239 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
24240 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
24242 /* Architecture variants -- don't add any more to this list either. */
24243 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24244 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
24245 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24246 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
24247 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24248 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
24249 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24250 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
24251 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24252 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
24253 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24254 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
24255 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24256 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
24257 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24258 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
24259 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24260 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
24262 /* Floating point variants -- don't add any more to this list either. */
24263 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
24264 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
24265 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
24266 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
24267 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
24269 {NULL, NULL, ARM_ARCH_NONE, NULL}
24272 struct arm_cpu_option_table
24276 const arm_feature_set value;
24277 /* For some CPUs we assume an FPU unless the user explicitly sets
24279 const arm_feature_set default_fpu;
24280 /* The canonical name of the CPU, or NULL to use NAME converted to upper
24282 const char *canonical_name;
24285 /* This list should, at a minimum, contain all the cpu names
24286 recognized by GCC. */
24287 #define ARM_CPU_OPT(N, V, DF, CN) { N, sizeof (N) - 1, V, DF, CN }
24288 static const struct arm_cpu_option_table arm_cpus[] =
24290 ARM_CPU_OPT ("all", ARM_ANY, FPU_ARCH_FPA, NULL),
24291 ARM_CPU_OPT ("arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL),
24292 ARM_CPU_OPT ("arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL),
24293 ARM_CPU_OPT ("arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24294 ARM_CPU_OPT ("arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL),
24295 ARM_CPU_OPT ("arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24296 ARM_CPU_OPT ("arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24297 ARM_CPU_OPT ("arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24298 ARM_CPU_OPT ("arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24299 ARM_CPU_OPT ("arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24300 ARM_CPU_OPT ("arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24301 ARM_CPU_OPT ("arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24302 ARM_CPU_OPT ("arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24303 ARM_CPU_OPT ("arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24304 ARM_CPU_OPT ("arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24305 ARM_CPU_OPT ("arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL),
24306 ARM_CPU_OPT ("arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24307 ARM_CPU_OPT ("arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24308 ARM_CPU_OPT ("arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24309 ARM_CPU_OPT ("arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24310 ARM_CPU_OPT ("arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24311 ARM_CPU_OPT ("arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24312 ARM_CPU_OPT ("arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24313 ARM_CPU_OPT ("arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24314 ARM_CPU_OPT ("arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24315 ARM_CPU_OPT ("arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24316 ARM_CPU_OPT ("arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24317 ARM_CPU_OPT ("arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL),
24318 ARM_CPU_OPT ("arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24319 ARM_CPU_OPT ("arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24320 ARM_CPU_OPT ("arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24321 ARM_CPU_OPT ("arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24322 ARM_CPU_OPT ("arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24323 ARM_CPU_OPT ("strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24324 ARM_CPU_OPT ("strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24325 ARM_CPU_OPT ("strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24326 ARM_CPU_OPT ("strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24327 ARM_CPU_OPT ("strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24328 ARM_CPU_OPT ("arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24329 ARM_CPU_OPT ("arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"),
24330 ARM_CPU_OPT ("arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24331 ARM_CPU_OPT ("arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24332 ARM_CPU_OPT ("arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24333 ARM_CPU_OPT ("arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL),
24334 ARM_CPU_OPT ("fa526", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24335 ARM_CPU_OPT ("fa626", ARM_ARCH_V4, FPU_ARCH_FPA, NULL),
24336 /* For V5 or later processors we default to using VFP; but the user
24337 should really set the FPU type explicitly. */
24338 ARM_CPU_OPT ("arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24339 ARM_CPU_OPT ("arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24340 ARM_CPU_OPT ("arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24341 ARM_CPU_OPT ("arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"),
24342 ARM_CPU_OPT ("arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24343 ARM_CPU_OPT ("arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24344 ARM_CPU_OPT ("arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"),
24345 ARM_CPU_OPT ("arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24346 ARM_CPU_OPT ("arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL),
24347 ARM_CPU_OPT ("arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"),
24348 ARM_CPU_OPT ("arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24349 ARM_CPU_OPT ("arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24350 ARM_CPU_OPT ("arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24351 ARM_CPU_OPT ("arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24352 ARM_CPU_OPT ("arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24353 ARM_CPU_OPT ("arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"),
24354 ARM_CPU_OPT ("arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL),
24355 ARM_CPU_OPT ("arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24356 ARM_CPU_OPT ("arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24357 ARM_CPU_OPT ("arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2,
24359 ARM_CPU_OPT ("arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL),
24360 ARM_CPU_OPT ("fa606te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24361 ARM_CPU_OPT ("fa616te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24362 ARM_CPU_OPT ("fa626te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24363 ARM_CPU_OPT ("fmp626", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24364 ARM_CPU_OPT ("fa726te", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL),
24365 ARM_CPU_OPT ("arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"),
24366 ARM_CPU_OPT ("arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL),
24367 ARM_CPU_OPT ("arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2,
24369 ARM_CPU_OPT ("arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL),
24370 ARM_CPU_OPT ("mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, "MPCore"),
24371 ARM_CPU_OPT ("mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, "MPCore"),
24372 ARM_CPU_OPT ("arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL),
24373 ARM_CPU_OPT ("arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL),
24374 ARM_CPU_OPT ("arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL),
24375 ARM_CPU_OPT ("arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL),
24376 ARM_CPU_OPT ("cortex-a5", ARM_ARCH_V7A_MP_SEC,
24377 FPU_NONE, "Cortex-A5"),
24378 ARM_CPU_OPT ("cortex-a7", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24380 ARM_CPU_OPT ("cortex-a8", ARM_ARCH_V7A_SEC,
24381 ARM_FEATURE (0, FPU_VFP_V3
24382 | FPU_NEON_EXT_V1),
24384 ARM_CPU_OPT ("cortex-a9", ARM_ARCH_V7A_MP_SEC,
24385 ARM_FEATURE (0, FPU_VFP_V3
24386 | FPU_NEON_EXT_V1),
24388 ARM_CPU_OPT ("cortex-a12", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24390 ARM_CPU_OPT ("cortex-a15", ARM_ARCH_V7VE, FPU_ARCH_NEON_VFP_V4,
24392 ARM_CPU_OPT ("cortex-a53", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24394 ARM_CPU_OPT ("cortex-a57", ARM_ARCH_V8A, FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24396 ARM_CPU_OPT ("cortex-r4", ARM_ARCH_V7R, FPU_NONE, "Cortex-R4"),
24397 ARM_CPU_OPT ("cortex-r4f", ARM_ARCH_V7R, FPU_ARCH_VFP_V3D16,
24399 ARM_CPU_OPT ("cortex-r5", ARM_ARCH_V7R_IDIV,
24400 FPU_NONE, "Cortex-R5"),
24401 ARM_CPU_OPT ("cortex-r7", ARM_ARCH_V7R_IDIV,
24402 FPU_ARCH_VFP_V3D16,
24404 ARM_CPU_OPT ("cortex-m4", ARM_ARCH_V7EM, FPU_NONE, "Cortex-M4"),
24405 ARM_CPU_OPT ("cortex-m3", ARM_ARCH_V7M, FPU_NONE, "Cortex-M3"),
24406 ARM_CPU_OPT ("cortex-m1", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M1"),
24407 ARM_CPU_OPT ("cortex-m0", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0"),
24408 ARM_CPU_OPT ("cortex-m0plus", ARM_ARCH_V6SM, FPU_NONE, "Cortex-M0+"),
24409 /* ??? XSCALE is really an architecture. */
24410 ARM_CPU_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24411 /* ??? iwmmxt is not a processor. */
24412 ARM_CPU_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL),
24413 ARM_CPU_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL),
24414 ARM_CPU_OPT ("i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL),
24416 ARM_CPU_OPT ("ep9312", ARM_FEATURE (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
24417 FPU_ARCH_MAVERICK, "ARM920T"),
24418 /* Marvell processors. */
24419 ARM_CPU_OPT ("marvell-pj4", ARM_FEATURE (ARM_AEXT_V7A | ARM_EXT_MP | ARM_EXT_SEC, 0),
24420 FPU_ARCH_VFP_V3D16, NULL),
24422 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
24426 struct arm_arch_option_table
24430 const arm_feature_set value;
24431 const arm_feature_set default_fpu;
24434 /* This list should, at a minimum, contain all the architecture names
24435 recognized by GCC. */
24436 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
24437 static const struct arm_arch_option_table arm_archs[] =
24439 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
24440 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
24441 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
24442 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
24443 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
24444 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
24445 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
24446 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
24447 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
24448 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
24449 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
24450 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
24451 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
24452 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
24453 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
24454 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
24455 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
24456 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
24457 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
24458 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
24459 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
24460 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP),
24461 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
24462 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
24463 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
24464 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP),
24465 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
24466 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
24467 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
24468 /* The official spelling of the ARMv7 profile variants is the dashed form.
24469 Accept the non-dashed form for compatibility with old toolchains. */
24470 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24471 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
24472 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24473 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24474 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
24475 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
24476 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
24477 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
24478 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
24479 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
24480 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
24481 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
24482 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24484 #undef ARM_ARCH_OPT
24486 /* ISA extensions in the co-processor and main instruction set space. */
24487 struct arm_option_extension_value_table
24491 const arm_feature_set value;
24492 const arm_feature_set allowed_archs;
24495 /* The following table must be in alphabetical order with a NULL last entry.
24497 #define ARM_EXT_OPT(N, V, AA) { N, sizeof (N) - 1, V, AA }
24498 static const struct arm_option_extension_value_table arm_extensions[] =
24500 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE (ARM_EXT_V8, 0)),
24501 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
24502 ARM_FEATURE (ARM_EXT_V8, 0)),
24503 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8,
24504 ARM_FEATURE (ARM_EXT_V8, 0)),
24505 ARM_EXT_OPT ("idiv", ARM_FEATURE (ARM_EXT_ADIV | ARM_EXT_DIV, 0),
24506 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24507 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE (0, ARM_CEXT_IWMMXT), ARM_ANY),
24508 ARM_EXT_OPT ("iwmmxt2",
24509 ARM_FEATURE (0, ARM_CEXT_IWMMXT2), ARM_ANY),
24510 ARM_EXT_OPT ("maverick",
24511 ARM_FEATURE (0, ARM_CEXT_MAVERICK), ARM_ANY),
24512 ARM_EXT_OPT ("mp", ARM_FEATURE (ARM_EXT_MP, 0),
24513 ARM_FEATURE (ARM_EXT_V7A | ARM_EXT_V7R, 0)),
24514 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
24515 ARM_FEATURE (ARM_EXT_V8, 0)),
24516 ARM_EXT_OPT ("os", ARM_FEATURE (ARM_EXT_OS, 0),
24517 ARM_FEATURE (ARM_EXT_V6M, 0)),
24518 ARM_EXT_OPT ("sec", ARM_FEATURE (ARM_EXT_SEC, 0),
24519 ARM_FEATURE (ARM_EXT_V6K | ARM_EXT_V7A, 0)),
24520 ARM_EXT_OPT ("virt", ARM_FEATURE (ARM_EXT_VIRT | ARM_EXT_ADIV
24522 ARM_FEATURE (ARM_EXT_V7A, 0)),
24523 ARM_EXT_OPT ("xscale",ARM_FEATURE (0, ARM_CEXT_XSCALE), ARM_ANY),
24524 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
24528 /* ISA floating-point and Advanced SIMD extensions. */
24529 struct arm_option_fpu_value_table
24532 const arm_feature_set value;
24535 /* This list should, at a minimum, contain all the fpu names
24536 recognized by GCC. */
24537 static const struct arm_option_fpu_value_table arm_fpus[] =
24539 {"softfpa", FPU_NONE},
24540 {"fpe", FPU_ARCH_FPE},
24541 {"fpe2", FPU_ARCH_FPE},
24542 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
24543 {"fpa", FPU_ARCH_FPA},
24544 {"fpa10", FPU_ARCH_FPA},
24545 {"fpa11", FPU_ARCH_FPA},
24546 {"arm7500fe", FPU_ARCH_FPA},
24547 {"softvfp", FPU_ARCH_VFP},
24548 {"softvfp+vfp", FPU_ARCH_VFP_V2},
24549 {"vfp", FPU_ARCH_VFP_V2},
24550 {"vfp9", FPU_ARCH_VFP_V2},
24551 {"vfp3", FPU_ARCH_VFP_V3}, /* For backwards compatbility. */
24552 {"vfp10", FPU_ARCH_VFP_V2},
24553 {"vfp10-r0", FPU_ARCH_VFP_V1},
24554 {"vfpxd", FPU_ARCH_VFP_V1xD},
24555 {"vfpv2", FPU_ARCH_VFP_V2},
24556 {"vfpv3", FPU_ARCH_VFP_V3},
24557 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
24558 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
24559 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
24560 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
24561 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
24562 {"arm1020t", FPU_ARCH_VFP_V1},
24563 {"arm1020e", FPU_ARCH_VFP_V2},
24564 {"arm1136jfs", FPU_ARCH_VFP_V2},
24565 {"arm1136jf-s", FPU_ARCH_VFP_V2},
24566 {"maverick", FPU_ARCH_MAVERICK},
24567 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
24568 {"neon-fp16", FPU_ARCH_NEON_FP16},
24569 {"vfpv4", FPU_ARCH_VFP_V4},
24570 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
24571 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
24572 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
24573 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
24574 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
24575 {"crypto-neon-fp-armv8",
24576 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
24577 {NULL, ARM_ARCH_NONE}
24580 struct arm_option_value_table
24586 static const struct arm_option_value_table arm_float_abis[] =
24588 {"hard", ARM_FLOAT_ABI_HARD},
24589 {"softfp", ARM_FLOAT_ABI_SOFTFP},
24590 {"soft", ARM_FLOAT_ABI_SOFT},
24595 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
24596 static const struct arm_option_value_table arm_eabis[] =
24598 {"gnu", EF_ARM_EABI_UNKNOWN},
24599 {"4", EF_ARM_EABI_VER4},
24600 {"5", EF_ARM_EABI_VER5},
24605 struct arm_long_option_table
24607 char * option; /* Substring to match. */
24608 char * help; /* Help information. */
24609 int (* func) (char * subopt); /* Function to decode sub-option. */
24610 char * deprecated; /* If non-null, print this message. */
24614 arm_parse_extension (char *str, const arm_feature_set **opt_p)
24616 arm_feature_set *ext_set = (arm_feature_set *)
24617 xmalloc (sizeof (arm_feature_set));
24619 /* We insist on extensions being specified in alphabetical order, and with
24620 extensions being added before being removed. We achieve this by having
24621 the global ARM_EXTENSIONS table in alphabetical order, and using the
24622 ADDING_VALUE variable to indicate whether we are adding an extension (1)
24623 or removing it (0) and only allowing it to change in the order
24625 const struct arm_option_extension_value_table * opt = NULL;
24626 int adding_value = -1;
24628 /* Copy the feature set, so that we can modify it. */
24629 *ext_set = **opt_p;
24632 while (str != NULL && *str != 0)
24639 as_bad (_("invalid architectural extension"));
24644 ext = strchr (str, '+');
24649 len = strlen (str);
24651 if (len >= 2 && strncmp (str, "no", 2) == 0)
24653 if (adding_value != 0)
24656 opt = arm_extensions;
24664 if (adding_value == -1)
24667 opt = arm_extensions;
24669 else if (adding_value != 1)
24671 as_bad (_("must specify extensions to add before specifying "
24672 "those to remove"));
24679 as_bad (_("missing architectural extension"));
24683 gas_assert (adding_value != -1);
24684 gas_assert (opt != NULL);
24686 /* Scan over the options table trying to find an exact match. */
24687 for (; opt->name != NULL; opt++)
24688 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24690 /* Check we can apply the extension to this architecture. */
24691 if (!ARM_CPU_HAS_FEATURE (*ext_set, opt->allowed_archs))
24693 as_bad (_("extension does not apply to the base architecture"));
24697 /* Add or remove the extension. */
24699 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
24701 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->value);
24706 if (opt->name == NULL)
24708 /* Did we fail to find an extension because it wasn't specified in
24709 alphabetical order, or because it does not exist? */
24711 for (opt = arm_extensions; opt->name != NULL; opt++)
24712 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24715 if (opt->name == NULL)
24716 as_bad (_("unknown architectural extension `%s'"), str);
24718 as_bad (_("architectural extensions must be specified in "
24719 "alphabetical order"));
24725 /* We should skip the extension we've just matched the next time
24737 arm_parse_cpu (char *str)
24739 const struct arm_cpu_option_table *opt;
24740 char *ext = strchr (str, '+');
24746 len = strlen (str);
24750 as_bad (_("missing cpu name `%s'"), str);
24754 for (opt = arm_cpus; opt->name != NULL; opt++)
24755 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24757 mcpu_cpu_opt = &opt->value;
24758 mcpu_fpu_opt = &opt->default_fpu;
24759 if (opt->canonical_name)
24760 strcpy (selected_cpu_name, opt->canonical_name);
24765 for (i = 0; i < len; i++)
24766 selected_cpu_name[i] = TOUPPER (opt->name[i]);
24767 selected_cpu_name[i] = 0;
24771 return arm_parse_extension (ext, &mcpu_cpu_opt);
24776 as_bad (_("unknown cpu `%s'"), str);
24781 arm_parse_arch (char *str)
24783 const struct arm_arch_option_table *opt;
24784 char *ext = strchr (str, '+');
24790 len = strlen (str);
24794 as_bad (_("missing architecture name `%s'"), str);
24798 for (opt = arm_archs; opt->name != NULL; opt++)
24799 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
24801 march_cpu_opt = &opt->value;
24802 march_fpu_opt = &opt->default_fpu;
24803 strcpy (selected_cpu_name, opt->name);
24806 return arm_parse_extension (ext, &march_cpu_opt);
24811 as_bad (_("unknown architecture `%s'\n"), str);
24816 arm_parse_fpu (char * str)
24818 const struct arm_option_fpu_value_table * opt;
24820 for (opt = arm_fpus; opt->name != NULL; opt++)
24821 if (streq (opt->name, str))
24823 mfpu_opt = &opt->value;
24827 as_bad (_("unknown floating point format `%s'\n"), str);
24832 arm_parse_float_abi (char * str)
24834 const struct arm_option_value_table * opt;
24836 for (opt = arm_float_abis; opt->name != NULL; opt++)
24837 if (streq (opt->name, str))
24839 mfloat_abi_opt = opt->value;
24843 as_bad (_("unknown floating point abi `%s'\n"), str);
24849 arm_parse_eabi (char * str)
24851 const struct arm_option_value_table *opt;
24853 for (opt = arm_eabis; opt->name != NULL; opt++)
24854 if (streq (opt->name, str))
24856 meabi_flags = opt->value;
24859 as_bad (_("unknown EABI `%s'\n"), str);
24865 arm_parse_it_mode (char * str)
24867 bfd_boolean ret = TRUE;
24869 if (streq ("arm", str))
24870 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
24871 else if (streq ("thumb", str))
24872 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
24873 else if (streq ("always", str))
24874 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
24875 else if (streq ("never", str))
24876 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
24879 as_bad (_("unknown implicit IT mode `%s', should be "\
24880 "arm, thumb, always, or never."), str);
24888 arm_ccs_mode (char * unused ATTRIBUTE_UNUSED)
24890 codecomposer_syntax = TRUE;
24891 arm_comment_chars[0] = ';';
24892 arm_line_separator_chars[0] = 0;
24896 struct arm_long_option_table arm_long_opts[] =
24898 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
24899 arm_parse_cpu, NULL},
24900 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
24901 arm_parse_arch, NULL},
24902 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
24903 arm_parse_fpu, NULL},
24904 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
24905 arm_parse_float_abi, NULL},
24907 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
24908 arm_parse_eabi, NULL},
24910 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
24911 arm_parse_it_mode, NULL},
24912 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
24913 arm_ccs_mode, NULL},
24914 {NULL, NULL, 0, NULL}
24918 md_parse_option (int c, char * arg)
24920 struct arm_option_table *opt;
24921 const struct arm_legacy_option_table *fopt;
24922 struct arm_long_option_table *lopt;
24928 target_big_endian = 1;
24934 target_big_endian = 0;
24938 case OPTION_FIX_V4BX:
24943 /* Listing option. Just ignore these, we don't support additional
24948 for (opt = arm_opts; opt->option != NULL; opt++)
24950 if (c == opt->option[0]
24951 && ((arg == NULL && opt->option[1] == 0)
24952 || streq (arg, opt->option + 1)))
24954 /* If the option is deprecated, tell the user. */
24955 if (warn_on_deprecated && opt->deprecated != NULL)
24956 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24957 arg ? arg : "", _(opt->deprecated));
24959 if (opt->var != NULL)
24960 *opt->var = opt->value;
24966 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
24968 if (c == fopt->option[0]
24969 && ((arg == NULL && fopt->option[1] == 0)
24970 || streq (arg, fopt->option + 1)))
24972 /* If the option is deprecated, tell the user. */
24973 if (warn_on_deprecated && fopt->deprecated != NULL)
24974 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
24975 arg ? arg : "", _(fopt->deprecated));
24977 if (fopt->var != NULL)
24978 *fopt->var = &fopt->value;
24984 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
24986 /* These options are expected to have an argument. */
24987 if (c == lopt->option[0]
24989 && strncmp (arg, lopt->option + 1,
24990 strlen (lopt->option + 1)) == 0)
24992 /* If the option is deprecated, tell the user. */
24993 if (warn_on_deprecated && lopt->deprecated != NULL)
24994 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
24995 _(lopt->deprecated));
24997 /* Call the sup-option parser. */
24998 return lopt->func (arg + strlen (lopt->option) - 1);
25009 md_show_usage (FILE * fp)
25011 struct arm_option_table *opt;
25012 struct arm_long_option_table *lopt;
25014 fprintf (fp, _(" ARM-specific assembler options:\n"));
25016 for (opt = arm_opts; opt->option != NULL; opt++)
25017 if (opt->help != NULL)
25018 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
25020 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
25021 if (lopt->help != NULL)
25022 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
25026 -EB assemble code for a big-endian cpu\n"));
25031 -EL assemble code for a little-endian cpu\n"));
25035 --fix-v4bx Allow BX in ARMv4 code\n"));
25043 arm_feature_set flags;
25044 } cpu_arch_ver_table;
25046 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
25047 least features first. */
25048 static const cpu_arch_ver_table cpu_arch_ver[] =
25054 {4, ARM_ARCH_V5TE},
25055 {5, ARM_ARCH_V5TEJ},
25059 {11, ARM_ARCH_V6M},
25060 {12, ARM_ARCH_V6SM},
25061 {8, ARM_ARCH_V6T2},
25062 {10, ARM_ARCH_V7VE},
25063 {10, ARM_ARCH_V7R},
25064 {10, ARM_ARCH_V7M},
25065 {14, ARM_ARCH_V8A},
25069 /* Set an attribute if it has not already been set by the user. */
25071 aeabi_set_attribute_int (int tag, int value)
25074 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25075 || !attributes_set_explicitly[tag])
25076 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
25080 aeabi_set_attribute_string (int tag, const char *value)
25083 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
25084 || !attributes_set_explicitly[tag])
25085 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
25088 /* Set the public EABI object attributes. */
25090 aeabi_set_public_attributes (void)
25095 int fp16_optional = 0;
25096 arm_feature_set flags;
25097 arm_feature_set tmp;
25098 const cpu_arch_ver_table *p;
25100 /* Choose the architecture based on the capabilities of the requested cpu
25101 (if any) and/or the instructions actually used. */
25102 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
25103 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
25104 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
25106 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
25107 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
25109 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
25110 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
25112 /* Allow the user to override the reported architecture. */
25115 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
25116 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
25119 /* We need to make sure that the attributes do not identify us as v6S-M
25120 when the only v6S-M feature in use is the Operating System Extensions. */
25121 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_os))
25122 if (!ARM_CPU_HAS_FEATURE (flags, arm_arch_v6m_only))
25123 ARM_CLEAR_FEATURE (flags, flags, arm_ext_os);
25127 for (p = cpu_arch_ver; p->val; p++)
25129 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
25132 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
25136 /* The table lookup above finds the last architecture to contribute
25137 a new feature. Unfortunately, Tag13 is a subset of the union of
25138 v6T2 and v7-M, so it is never seen as contributing a new feature.
25139 We can not search for the last entry which is entirely used,
25140 because if no CPU is specified we build up only those flags
25141 actually used. Perhaps we should separate out the specified
25142 and implicit cases. Avoid taking this path for -march=all by
25143 checking for contradictory v7-A / v7-M features. */
25145 && !ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a)
25146 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m)
25147 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v6_dsp))
25150 /* Tag_CPU_name. */
25151 if (selected_cpu_name[0])
25155 q = selected_cpu_name;
25156 if (strncmp (q, "armv", 4) == 0)
25161 for (i = 0; q[i]; i++)
25162 q[i] = TOUPPER (q[i]);
25164 aeabi_set_attribute_string (Tag_CPU_name, q);
25167 /* Tag_CPU_arch. */
25168 aeabi_set_attribute_int (Tag_CPU_arch, arch);
25170 /* Tag_CPU_arch_profile. */
25171 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
25173 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
25175 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_m))
25180 if (profile != '\0')
25181 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
25183 /* Tag_ARM_ISA_use. */
25184 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
25186 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
25188 /* Tag_THUMB_ISA_use. */
25189 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
25191 aeabi_set_attribute_int (Tag_THUMB_ISA_use,
25192 ARM_CPU_HAS_FEATURE (flags, arm_arch_t2) ? 2 : 1);
25194 /* Tag_VFP_arch. */
25195 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8))
25196 aeabi_set_attribute_int (Tag_VFP_arch, 7);
25197 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
25198 aeabi_set_attribute_int (Tag_VFP_arch,
25199 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
25201 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
25204 aeabi_set_attribute_int (Tag_VFP_arch, 3);
25206 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
25208 aeabi_set_attribute_int (Tag_VFP_arch, 4);
25211 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
25212 aeabi_set_attribute_int (Tag_VFP_arch, 2);
25213 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
25214 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
25215 aeabi_set_attribute_int (Tag_VFP_arch, 1);
25217 /* Tag_ABI_HardFP_use. */
25218 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
25219 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
25220 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
25222 /* Tag_WMMX_arch. */
25223 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
25224 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
25225 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
25226 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
25228 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
25229 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
25230 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
25231 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
25233 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
25235 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
25239 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
25244 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
25245 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
25246 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
25250 We set Tag_DIV_use to two when integer divide instructions have been used
25251 in ARM state, or when Thumb integer divide instructions have been used,
25252 but we have no architecture profile set, nor have we any ARM instructions.
25254 For ARMv8 we set the tag to 0 as integer divide is implied by the base
25257 For new architectures we will have to check these tests. */
25258 gas_assert (arch <= TAG_CPU_ARCH_V8);
25259 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8))
25260 aeabi_set_attribute_int (Tag_DIV_use, 0);
25261 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
25262 || (profile == '\0'
25263 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
25264 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
25265 aeabi_set_attribute_int (Tag_DIV_use, 2);
25267 /* Tag_MP_extension_use. */
25268 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
25269 aeabi_set_attribute_int (Tag_MPextension_use, 1);
25271 /* Tag Virtualization_use. */
25272 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
25274 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
25277 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
25280 /* Add the default contents for the .ARM.attributes section. */
25284 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25287 aeabi_set_public_attributes ();
25289 #endif /* OBJ_ELF */
25292 /* Parse a .cpu directive. */
25295 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
25297 const struct arm_cpu_option_table *opt;
25301 name = input_line_pointer;
25302 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25303 input_line_pointer++;
25304 saved_char = *input_line_pointer;
25305 *input_line_pointer = 0;
25307 /* Skip the first "all" entry. */
25308 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
25309 if (streq (opt->name, name))
25311 mcpu_cpu_opt = &opt->value;
25312 selected_cpu = opt->value;
25313 if (opt->canonical_name)
25314 strcpy (selected_cpu_name, opt->canonical_name);
25318 for (i = 0; opt->name[i]; i++)
25319 selected_cpu_name[i] = TOUPPER (opt->name[i]);
25321 selected_cpu_name[i] = 0;
25323 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25324 *input_line_pointer = saved_char;
25325 demand_empty_rest_of_line ();
25328 as_bad (_("unknown cpu `%s'"), name);
25329 *input_line_pointer = saved_char;
25330 ignore_rest_of_line ();
25334 /* Parse a .arch directive. */
25337 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
25339 const struct arm_arch_option_table *opt;
25343 name = input_line_pointer;
25344 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25345 input_line_pointer++;
25346 saved_char = *input_line_pointer;
25347 *input_line_pointer = 0;
25349 /* Skip the first "all" entry. */
25350 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25351 if (streq (opt->name, name))
25353 mcpu_cpu_opt = &opt->value;
25354 selected_cpu = opt->value;
25355 strcpy (selected_cpu_name, opt->name);
25356 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25357 *input_line_pointer = saved_char;
25358 demand_empty_rest_of_line ();
25362 as_bad (_("unknown architecture `%s'\n"), name);
25363 *input_line_pointer = saved_char;
25364 ignore_rest_of_line ();
25368 /* Parse a .object_arch directive. */
25371 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
25373 const struct arm_arch_option_table *opt;
25377 name = input_line_pointer;
25378 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25379 input_line_pointer++;
25380 saved_char = *input_line_pointer;
25381 *input_line_pointer = 0;
25383 /* Skip the first "all" entry. */
25384 for (opt = arm_archs + 1; opt->name != NULL; opt++)
25385 if (streq (opt->name, name))
25387 object_arch = &opt->value;
25388 *input_line_pointer = saved_char;
25389 demand_empty_rest_of_line ();
25393 as_bad (_("unknown architecture `%s'\n"), name);
25394 *input_line_pointer = saved_char;
25395 ignore_rest_of_line ();
25398 /* Parse a .arch_extension directive. */
25401 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
25403 const struct arm_option_extension_value_table *opt;
25406 int adding_value = 1;
25408 name = input_line_pointer;
25409 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25410 input_line_pointer++;
25411 saved_char = *input_line_pointer;
25412 *input_line_pointer = 0;
25414 if (strlen (name) >= 2
25415 && strncmp (name, "no", 2) == 0)
25421 for (opt = arm_extensions; opt->name != NULL; opt++)
25422 if (streq (opt->name, name))
25424 if (!ARM_CPU_HAS_FEATURE (*mcpu_cpu_opt, opt->allowed_archs))
25426 as_bad (_("architectural extension `%s' is not allowed for the "
25427 "current base architecture"), name);
25432 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_cpu, opt->value);
25434 ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, opt->value);
25436 mcpu_cpu_opt = &selected_cpu;
25437 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25438 *input_line_pointer = saved_char;
25439 demand_empty_rest_of_line ();
25443 if (opt->name == NULL)
25444 as_bad (_("unknown architecture extension `%s'\n"), name);
25446 *input_line_pointer = saved_char;
25447 ignore_rest_of_line ();
25450 /* Parse a .fpu directive. */
25453 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
25455 const struct arm_option_fpu_value_table *opt;
25459 name = input_line_pointer;
25460 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
25461 input_line_pointer++;
25462 saved_char = *input_line_pointer;
25463 *input_line_pointer = 0;
25465 for (opt = arm_fpus; opt->name != NULL; opt++)
25466 if (streq (opt->name, name))
25468 mfpu_opt = &opt->value;
25469 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
25470 *input_line_pointer = saved_char;
25471 demand_empty_rest_of_line ();
25475 as_bad (_("unknown floating point format `%s'\n"), name);
25476 *input_line_pointer = saved_char;
25477 ignore_rest_of_line ();
25480 /* Copy symbol information. */
25483 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
25485 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
25489 /* Given a symbolic attribute NAME, return the proper integer value.
25490 Returns -1 if the attribute is not known. */
25493 arm_convert_symbolic_attribute (const char *name)
25495 static const struct
25500 attribute_table[] =
25502 /* When you modify this table you should
25503 also modify the list in doc/c-arm.texi. */
25504 #define T(tag) {#tag, tag}
25505 T (Tag_CPU_raw_name),
25508 T (Tag_CPU_arch_profile),
25509 T (Tag_ARM_ISA_use),
25510 T (Tag_THUMB_ISA_use),
25514 T (Tag_Advanced_SIMD_arch),
25515 T (Tag_PCS_config),
25516 T (Tag_ABI_PCS_R9_use),
25517 T (Tag_ABI_PCS_RW_data),
25518 T (Tag_ABI_PCS_RO_data),
25519 T (Tag_ABI_PCS_GOT_use),
25520 T (Tag_ABI_PCS_wchar_t),
25521 T (Tag_ABI_FP_rounding),
25522 T (Tag_ABI_FP_denormal),
25523 T (Tag_ABI_FP_exceptions),
25524 T (Tag_ABI_FP_user_exceptions),
25525 T (Tag_ABI_FP_number_model),
25526 T (Tag_ABI_align_needed),
25527 T (Tag_ABI_align8_needed),
25528 T (Tag_ABI_align_preserved),
25529 T (Tag_ABI_align8_preserved),
25530 T (Tag_ABI_enum_size),
25531 T (Tag_ABI_HardFP_use),
25532 T (Tag_ABI_VFP_args),
25533 T (Tag_ABI_WMMX_args),
25534 T (Tag_ABI_optimization_goals),
25535 T (Tag_ABI_FP_optimization_goals),
25536 T (Tag_compatibility),
25537 T (Tag_CPU_unaligned_access),
25538 T (Tag_FP_HP_extension),
25539 T (Tag_VFP_HP_extension),
25540 T (Tag_ABI_FP_16bit_format),
25541 T (Tag_MPextension_use),
25543 T (Tag_nodefaults),
25544 T (Tag_also_compatible_with),
25545 T (Tag_conformance),
25547 T (Tag_Virtualization_use),
25548 /* We deliberately do not include Tag_MPextension_use_legacy. */
25556 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
25557 if (streq (name, attribute_table[i].name))
25558 return attribute_table[i].tag;
25564 /* Apply sym value for relocations only in the case that
25565 they are for local symbols and you have the respective
25566 architectural feature for blx and simple switches. */
25568 arm_apply_sym_value (struct fix * fixP)
25571 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
25572 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
25574 switch (fixP->fx_r_type)
25576 case BFD_RELOC_ARM_PCREL_BLX:
25577 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25578 if (ARM_IS_FUNC (fixP->fx_addsy))
25582 case BFD_RELOC_ARM_PCREL_CALL:
25583 case BFD_RELOC_THUMB_PCREL_BLX:
25584 if (THUMB_IS_FUNC (fixP->fx_addsy))
25595 #endif /* OBJ_ELF */