1 /* tc-arm.c -- Assemble for the ARM
2 Copyright (C) 1994-2018 Free Software Foundation, Inc.
3 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 Modified by David Taylor (dtaylor@armltd.co.uk)
5 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
9 This file is part of GAS, the GNU Assembler.
11 GAS is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 3, or (at your option)
16 GAS is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GAS; see the file COPYING. If not, write to the Free
23 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
30 #include "safe-ctype.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
38 #include "dw2gencfi.h"
41 #include "dwarf2dbg.h"
44 /* Must be at least the size of the largest unwind opcode (currently two). */
45 #define ARM_OPCODE_CHUNK_SIZE 8
47 /* This structure holds the unwinding state. */
52 symbolS * table_entry;
53 symbolS * personality_routine;
54 int personality_index;
55 /* The segment containing the function. */
58 /* Opcodes generated from this function. */
59 unsigned char * opcodes;
62 /* The number of bytes pushed to the stack. */
64 /* We don't add stack adjustment opcodes immediately so that we can merge
65 multiple adjustments. We can also omit the final adjustment
66 when using a frame pointer. */
67 offsetT pending_offset;
68 /* These two fields are set by both unwind_movsp and unwind_setfp. They
69 hold the reg+offset to use when restoring sp from a frame pointer. */
72 /* Nonzero if an unwind_setfp directive has been seen. */
74 /* Nonzero if the last opcode restores sp from fp_reg. */
75 unsigned sp_restored:1;
78 /* Whether --fdpic was given. */
83 /* Results from operand parsing worker functions. */
87 PARSE_OPERAND_SUCCESS,
89 PARSE_OPERAND_FAIL_NO_BACKTRACK
90 } parse_operand_result;
99 /* Types of processor to assemble for. */
101 /* The code that was here used to select a default CPU depending on compiler
102 pre-defines which were only present when doing native builds, thus
103 changing gas' default behaviour depending upon the build host.
105 If you have a target that requires a default CPU option then the you
106 should define CPU_DEFAULT here. */
111 # define FPU_DEFAULT FPU_ARCH_FPA
112 # elif defined (TE_NetBSD)
114 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
116 /* Legacy a.out format. */
117 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
119 # elif defined (TE_VXWORKS)
120 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
122 /* For backwards compatibility, default to FPA. */
123 # define FPU_DEFAULT FPU_ARCH_FPA
125 #endif /* ifndef FPU_DEFAULT */
127 #define streq(a, b) (strcmp (a, b) == 0)
129 /* Current set of feature bits available (CPU+FPU). Different from
130 selected_cpu + selected_fpu in case of autodetection since the CPU
131 feature bits are then all set. */
132 static arm_feature_set cpu_variant;
133 /* Feature bits used in each execution state. Used to set build attribute
134 (in particular Tag_*_ISA_use) in CPU autodetection mode. */
135 static arm_feature_set arm_arch_used;
136 static arm_feature_set thumb_arch_used;
138 /* Flags stored in private area of BFD structure. */
139 static int uses_apcs_26 = FALSE;
140 static int atpcs = FALSE;
141 static int support_interwork = FALSE;
142 static int uses_apcs_float = FALSE;
143 static int pic_code = FALSE;
144 static int fix_v4bx = FALSE;
145 /* Warn on using deprecated features. */
146 static int warn_on_deprecated = TRUE;
148 /* Understand CodeComposer Studio assembly syntax. */
149 bfd_boolean codecomposer_syntax = FALSE;
151 /* Variables that we set while parsing command-line options. Once all
152 options have been read we re-process these values to set the real
155 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
156 instead of -mcpu=arm1). */
157 static const arm_feature_set *legacy_cpu = NULL;
158 static const arm_feature_set *legacy_fpu = NULL;
160 /* CPU, extension and FPU feature bits selected by -mcpu. */
161 static const arm_feature_set *mcpu_cpu_opt = NULL;
162 static arm_feature_set *mcpu_ext_opt = NULL;
163 static const arm_feature_set *mcpu_fpu_opt = NULL;
165 /* CPU, extension and FPU feature bits selected by -march. */
166 static const arm_feature_set *march_cpu_opt = NULL;
167 static arm_feature_set *march_ext_opt = NULL;
168 static const arm_feature_set *march_fpu_opt = NULL;
170 /* Feature bits selected by -mfpu. */
171 static const arm_feature_set *mfpu_opt = NULL;
173 /* Constants for known architecture features. */
174 static const arm_feature_set fpu_default = FPU_DEFAULT;
175 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
176 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
177 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
178 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
179 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
180 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
182 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
184 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
187 static const arm_feature_set cpu_default = CPU_DEFAULT;
190 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
191 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
192 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
193 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
194 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
195 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
196 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
197 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
198 static const arm_feature_set arm_ext_v4t_5 =
199 ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
200 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
201 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
202 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
203 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
204 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
205 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
206 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
207 static const arm_feature_set arm_ext_v6_notm =
208 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
209 static const arm_feature_set arm_ext_v6_dsp =
210 ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
211 static const arm_feature_set arm_ext_barrier =
212 ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
213 static const arm_feature_set arm_ext_msr =
214 ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
215 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
216 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
217 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
218 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
220 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
222 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
223 static const arm_feature_set arm_ext_m =
224 ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
225 ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
226 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
227 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
228 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
229 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
230 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
231 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
232 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
233 static const arm_feature_set arm_ext_v8m_main =
234 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
235 /* Instructions in ARMv8-M only found in M profile architectures. */
236 static const arm_feature_set arm_ext_v8m_m_only =
237 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
238 static const arm_feature_set arm_ext_v6t2_v8m =
239 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
240 /* Instructions shared between ARMv8-A and ARMv8-M. */
241 static const arm_feature_set arm_ext_atomics =
242 ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
244 /* DSP instructions Tag_DSP_extension refers to. */
245 static const arm_feature_set arm_ext_dsp =
246 ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
248 static const arm_feature_set arm_ext_ras =
249 ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
250 /* FP16 instructions. */
251 static const arm_feature_set arm_ext_fp16 =
252 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
253 static const arm_feature_set arm_ext_fp16_fml =
254 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
255 static const arm_feature_set arm_ext_v8_2 =
256 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
257 static const arm_feature_set arm_ext_v8_3 =
258 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
259 static const arm_feature_set arm_ext_sb =
260 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
262 static const arm_feature_set arm_arch_any = ARM_ANY;
264 static const arm_feature_set fpu_any = FPU_ANY;
266 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
267 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
268 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
270 static const arm_feature_set arm_cext_iwmmxt2 =
271 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
272 static const arm_feature_set arm_cext_iwmmxt =
273 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
274 static const arm_feature_set arm_cext_xscale =
275 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
276 static const arm_feature_set arm_cext_maverick =
277 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
278 static const arm_feature_set fpu_fpa_ext_v1 =
279 ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
280 static const arm_feature_set fpu_fpa_ext_v2 =
281 ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
282 static const arm_feature_set fpu_vfp_ext_v1xd =
283 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
284 static const arm_feature_set fpu_vfp_ext_v1 =
285 ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
286 static const arm_feature_set fpu_vfp_ext_v2 =
287 ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
288 static const arm_feature_set fpu_vfp_ext_v3xd =
289 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
290 static const arm_feature_set fpu_vfp_ext_v3 =
291 ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
292 static const arm_feature_set fpu_vfp_ext_d32 =
293 ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
294 static const arm_feature_set fpu_neon_ext_v1 =
295 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
296 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
297 ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
299 static const arm_feature_set fpu_vfp_fp16 =
300 ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
301 static const arm_feature_set fpu_neon_ext_fma =
302 ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
304 static const arm_feature_set fpu_vfp_ext_fma =
305 ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
306 static const arm_feature_set fpu_vfp_ext_armv8 =
307 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
308 static const arm_feature_set fpu_vfp_ext_armv8xd =
309 ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
310 static const arm_feature_set fpu_neon_ext_armv8 =
311 ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
312 static const arm_feature_set fpu_crypto_ext_armv8 =
313 ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
314 static const arm_feature_set crc_ext_armv8 =
315 ARM_FEATURE_COPROC (CRC_EXT_ARMV8);
316 static const arm_feature_set fpu_neon_ext_v8_1 =
317 ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
318 static const arm_feature_set fpu_neon_ext_dotprod =
319 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
321 static int mfloat_abi_opt = -1;
322 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
324 static arm_feature_set selected_arch = ARM_ARCH_NONE;
325 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
327 static arm_feature_set selected_ext = ARM_ARCH_NONE;
328 /* Feature bits selected by the last -mcpu/-march or by the combination of the
329 last .cpu/.arch directive .arch_extension directives since that
331 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
332 /* FPU feature bits selected by the last -mfpu or .fpu directive. */
333 static arm_feature_set selected_fpu = FPU_NONE;
334 /* Feature bits selected by the last .object_arch directive. */
335 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
336 /* Must be long enough to hold any of the names in arm_cpus. */
337 static char selected_cpu_name[20];
339 extern FLONUM_TYPE generic_floating_point_number;
341 /* Return if no cpu was selected on command-line. */
343 no_cpu_selected (void)
345 return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
350 static int meabi_flags = EABI_DEFAULT;
352 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
355 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
360 return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
365 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
366 symbolS * GOT_symbol;
369 /* 0: assemble for ARM,
370 1: assemble for Thumb,
371 2: assemble for Thumb even though target CPU does not support thumb
373 static int thumb_mode = 0;
374 /* A value distinct from the possible values for thumb_mode that we
375 can use to record whether thumb_mode has been copied into the
376 tc_frag_data field of a frag. */
377 #define MODE_RECORDED (1 << 4)
379 /* Specifies the intrinsic IT insn behavior mode. */
380 enum implicit_it_mode
382 IMPLICIT_IT_MODE_NEVER = 0x00,
383 IMPLICIT_IT_MODE_ARM = 0x01,
384 IMPLICIT_IT_MODE_THUMB = 0x02,
385 IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
387 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
389 /* If unified_syntax is true, we are processing the new unified
390 ARM/Thumb syntax. Important differences from the old ARM mode:
392 - Immediate operands do not require a # prefix.
393 - Conditional affixes always appear at the end of the
394 instruction. (For backward compatibility, those instructions
395 that formerly had them in the middle, continue to accept them
397 - The IT instruction may appear, and if it does is validated
398 against subsequent conditional affixes. It does not generate
401 Important differences from the old Thumb mode:
403 - Immediate operands do not require a # prefix.
404 - Most of the V6T2 instructions are only available in unified mode.
405 - The .N and .W suffixes are recognized and honored (it is an error
406 if they cannot be honored).
407 - All instructions set the flags if and only if they have an 's' affix.
408 - Conditional affixes may be used. They are validated against
409 preceding IT instructions. Unlike ARM mode, you cannot use a
410 conditional affix except in the scope of an IT instruction. */
412 static bfd_boolean unified_syntax = FALSE;
414 /* An immediate operand can start with #, and ld*, st*, pld operands
415 can contain [ and ]. We need to tell APP not to elide whitespace
416 before a [, which can appear as the first operand for pld.
417 Likewise, a { can appear as the first operand for push, pop, vld*, etc. */
418 const char arm_symbol_chars[] = "#[]{}";
433 enum neon_el_type type;
437 #define NEON_MAX_TYPE_ELS 4
441 struct neon_type_el el[NEON_MAX_TYPE_ELS];
445 enum it_instruction_type
450 IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
451 if inside, should be the last one. */
452 NEUTRAL_IT_INSN, /* This could be either inside or outside,
453 i.e. BKPT and NOP. */
454 IT_INSN /* The IT insn has been parsed. */
457 /* The maximum number of operands we need. */
458 #define ARM_IT_MAX_OPERANDS 6
463 unsigned long instruction;
467 /* "uncond_value" is set to the value in place of the conditional field in
468 unconditional versions of the instruction, or -1 if nothing is
471 struct neon_type vectype;
472 /* This does not indicate an actual NEON instruction, only that
473 the mnemonic accepts neon-style type suffixes. */
475 /* Set to the opcode if the instruction needs relaxation.
476 Zero if the instruction is not relaxed. */
480 bfd_reloc_code_real_type type;
485 enum it_instruction_type it_insn_type;
491 struct neon_type_el vectype;
492 unsigned present : 1; /* Operand present. */
493 unsigned isreg : 1; /* Operand was a register. */
494 unsigned immisreg : 1; /* .imm field is a second register. */
495 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
496 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
497 unsigned immisfloat : 1; /* Immediate was parsed as a float. */
498 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
499 instructions. This allows us to disambiguate ARM <-> vector insns. */
500 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
501 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
502 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
503 unsigned issingle : 1; /* Operand is VFP single-precision register. */
504 unsigned hasreloc : 1; /* Operand has relocation suffix. */
505 unsigned writeback : 1; /* Operand has trailing ! */
506 unsigned preind : 1; /* Preindexed address. */
507 unsigned postind : 1; /* Postindexed address. */
508 unsigned negative : 1; /* Index register was negated. */
509 unsigned shifted : 1; /* Shift applied to operation. */
510 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
511 } operands[ARM_IT_MAX_OPERANDS];
514 static struct arm_it inst;
516 #define NUM_FLOAT_VALS 8
518 const char * fp_const[] =
520 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
523 /* Number of littlenums required to hold an extended precision number. */
524 #define MAX_LITTLENUMS 6
526 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
536 #define CP_T_X 0x00008000
537 #define CP_T_Y 0x00400000
539 #define CONDS_BIT 0x00100000
540 #define LOAD_BIT 0x00100000
542 #define DOUBLE_LOAD_FLAG 0x00000001
546 const char * template_name;
550 #define COND_ALWAYS 0xE
554 const char * template_name;
558 struct asm_barrier_opt
560 const char * template_name;
562 const arm_feature_set arch;
565 /* The bit that distinguishes CPSR and SPSR. */
566 #define SPSR_BIT (1 << 22)
568 /* The individual PSR flag bits. */
569 #define PSR_c (1 << 16)
570 #define PSR_x (1 << 17)
571 #define PSR_s (1 << 18)
572 #define PSR_f (1 << 19)
577 bfd_reloc_code_real_type reloc;
582 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
583 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
588 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
591 /* Bits for DEFINED field in neon_typed_alias. */
592 #define NTA_HASTYPE 1
593 #define NTA_HASINDEX 2
595 struct neon_typed_alias
597 unsigned char defined;
599 struct neon_type_el eltype;
602 /* ARM register categories. This includes coprocessor numbers and various
603 architecture extensions' registers. Each entry should have an error message
604 in reg_expected_msgs below. */
632 /* Structure for a hash table entry for a register.
633 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
634 information which states whether a vector type or index is specified (for a
635 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
641 unsigned char builtin;
642 struct neon_typed_alias * neon;
645 /* Diagnostics used when we don't get a register of the expected type. */
646 const char * const reg_expected_msgs[] =
648 [REG_TYPE_RN] = N_("ARM register expected"),
649 [REG_TYPE_CP] = N_("bad or missing co-processor number"),
650 [REG_TYPE_CN] = N_("co-processor register expected"),
651 [REG_TYPE_FN] = N_("FPA register expected"),
652 [REG_TYPE_VFS] = N_("VFP single precision register expected"),
653 [REG_TYPE_VFD] = N_("VFP/Neon double precision register expected"),
654 [REG_TYPE_NQ] = N_("Neon quad precision register expected"),
655 [REG_TYPE_VFSD] = N_("VFP single or double precision register expected"),
656 [REG_TYPE_NDQ] = N_("Neon double or quad precision register expected"),
657 [REG_TYPE_NSD] = N_("Neon single or double precision register expected"),
658 [REG_TYPE_NSDQ] = N_("VFP single, double or Neon quad precision register"
660 [REG_TYPE_VFC] = N_("VFP system register expected"),
661 [REG_TYPE_MVF] = N_("Maverick MVF register expected"),
662 [REG_TYPE_MVD] = N_("Maverick MVD register expected"),
663 [REG_TYPE_MVFX] = N_("Maverick MVFX register expected"),
664 [REG_TYPE_MVDX] = N_("Maverick MVDX register expected"),
665 [REG_TYPE_MVAX] = N_("Maverick MVAX register expected"),
666 [REG_TYPE_DSPSC] = N_("Maverick DSPSC register expected"),
667 [REG_TYPE_MMXWR] = N_("iWMMXt data register expected"),
668 [REG_TYPE_MMXWC] = N_("iWMMXt control register expected"),
669 [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
670 [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
671 [REG_TYPE_RNB] = N_("")
674 /* Some well known registers that we refer to directly elsewhere. */
680 /* ARM instructions take 4bytes in the object file, Thumb instructions
686 /* Basic string to match. */
687 const char * template_name;
689 /* Parameters to instruction. */
690 unsigned int operands[8];
692 /* Conditional tag - see opcode_lookup. */
693 unsigned int tag : 4;
695 /* Basic instruction code. */
696 unsigned int avalue : 28;
698 /* Thumb-format instruction code. */
701 /* Which architecture variant provides this instruction. */
702 const arm_feature_set * avariant;
703 const arm_feature_set * tvariant;
705 /* Function to call to encode instruction in ARM format. */
706 void (* aencode) (void);
708 /* Function to call to encode instruction in Thumb format. */
709 void (* tencode) (void);
712 /* Defines for various bits that we will want to toggle. */
713 #define INST_IMMEDIATE 0x02000000
714 #define OFFSET_REG 0x02000000
715 #define HWOFFSET_IMM 0x00400000
716 #define SHIFT_BY_REG 0x00000010
717 #define PRE_INDEX 0x01000000
718 #define INDEX_UP 0x00800000
719 #define WRITE_BACK 0x00200000
720 #define LDM_TYPE_2_OR_3 0x00400000
721 #define CPSI_MMOD 0x00020000
723 #define LITERAL_MASK 0xf000f000
724 #define OPCODE_MASK 0xfe1fffff
725 #define V4_STR_BIT 0x00000020
726 #define VLDR_VMOV_SAME 0x0040f000
728 #define T2_SUBS_PC_LR 0xf3de8f00
730 #define DATA_OP_SHIFT 21
731 #define SBIT_SHIFT 20
733 #define T2_OPCODE_MASK 0xfe1fffff
734 #define T2_DATA_OP_SHIFT 21
735 #define T2_SBIT_SHIFT 20
737 #define A_COND_MASK 0xf0000000
738 #define A_PUSH_POP_OP_MASK 0x0fff0000
740 /* Opcodes for pushing/poping registers to/from the stack. */
741 #define A1_OPCODE_PUSH 0x092d0000
742 #define A2_OPCODE_PUSH 0x052d0004
743 #define A2_OPCODE_POP 0x049d0004
745 /* Codes to distinguish the arithmetic instructions. */
756 #define OPCODE_CMP 10
757 #define OPCODE_CMN 11
758 #define OPCODE_ORR 12
759 #define OPCODE_MOV 13
760 #define OPCODE_BIC 14
761 #define OPCODE_MVN 15
763 #define T2_OPCODE_AND 0
764 #define T2_OPCODE_BIC 1
765 #define T2_OPCODE_ORR 2
766 #define T2_OPCODE_ORN 3
767 #define T2_OPCODE_EOR 4
768 #define T2_OPCODE_ADD 8
769 #define T2_OPCODE_ADC 10
770 #define T2_OPCODE_SBC 11
771 #define T2_OPCODE_SUB 13
772 #define T2_OPCODE_RSB 14
774 #define T_OPCODE_MUL 0x4340
775 #define T_OPCODE_TST 0x4200
776 #define T_OPCODE_CMN 0x42c0
777 #define T_OPCODE_NEG 0x4240
778 #define T_OPCODE_MVN 0x43c0
780 #define T_OPCODE_ADD_R3 0x1800
781 #define T_OPCODE_SUB_R3 0x1a00
782 #define T_OPCODE_ADD_HI 0x4400
783 #define T_OPCODE_ADD_ST 0xb000
784 #define T_OPCODE_SUB_ST 0xb080
785 #define T_OPCODE_ADD_SP 0xa800
786 #define T_OPCODE_ADD_PC 0xa000
787 #define T_OPCODE_ADD_I8 0x3000
788 #define T_OPCODE_SUB_I8 0x3800
789 #define T_OPCODE_ADD_I3 0x1c00
790 #define T_OPCODE_SUB_I3 0x1e00
792 #define T_OPCODE_ASR_R 0x4100
793 #define T_OPCODE_LSL_R 0x4080
794 #define T_OPCODE_LSR_R 0x40c0
795 #define T_OPCODE_ROR_R 0x41c0
796 #define T_OPCODE_ASR_I 0x1000
797 #define T_OPCODE_LSL_I 0x0000
798 #define T_OPCODE_LSR_I 0x0800
800 #define T_OPCODE_MOV_I8 0x2000
801 #define T_OPCODE_CMP_I8 0x2800
802 #define T_OPCODE_CMP_LR 0x4280
803 #define T_OPCODE_MOV_HR 0x4600
804 #define T_OPCODE_CMP_HR 0x4500
806 #define T_OPCODE_LDR_PC 0x4800
807 #define T_OPCODE_LDR_SP 0x9800
808 #define T_OPCODE_STR_SP 0x9000
809 #define T_OPCODE_LDR_IW 0x6800
810 #define T_OPCODE_STR_IW 0x6000
811 #define T_OPCODE_LDR_IH 0x8800
812 #define T_OPCODE_STR_IH 0x8000
813 #define T_OPCODE_LDR_IB 0x7800
814 #define T_OPCODE_STR_IB 0x7000
815 #define T_OPCODE_LDR_RW 0x5800
816 #define T_OPCODE_STR_RW 0x5000
817 #define T_OPCODE_LDR_RH 0x5a00
818 #define T_OPCODE_STR_RH 0x5200
819 #define T_OPCODE_LDR_RB 0x5c00
820 #define T_OPCODE_STR_RB 0x5400
822 #define T_OPCODE_PUSH 0xb400
823 #define T_OPCODE_POP 0xbc00
825 #define T_OPCODE_BRANCH 0xe000
827 #define THUMB_SIZE 2 /* Size of thumb instruction. */
828 #define THUMB_PP_PC_LR 0x0100
829 #define THUMB_LOAD_BIT 0x0800
830 #define THUMB2_LOAD_BIT 0x00100000
832 #define BAD_ARGS _("bad arguments to instruction")
833 #define BAD_SP _("r13 not allowed here")
834 #define BAD_PC _("r15 not allowed here")
835 #define BAD_COND _("instruction cannot be conditional")
836 #define BAD_OVERLAP _("registers may not be the same")
837 #define BAD_HIREG _("lo register required")
838 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
839 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
840 #define BAD_BRANCH _("branch must be last instruction in IT block")
841 #define BAD_NOT_IT _("instruction not allowed in IT block")
842 #define BAD_FPU _("selected FPU does not support instruction")
843 #define BAD_OUT_IT _("thumb conditional instruction should be in IT block")
844 #define BAD_IT_COND _("incorrect condition in IT block")
845 #define BAD_IT_IT _("IT falling in the range of a previous IT block")
846 #define MISSING_FNSTART _("missing .fnstart before unwinding directive")
847 #define BAD_PC_ADDRESSING \
848 _("cannot use register index with PC-relative addressing")
849 #define BAD_PC_WRITEBACK \
850 _("cannot use writeback with PC-relative addressing")
851 #define BAD_RANGE _("branch out of range")
852 #define BAD_FP16 _("selected processor does not support fp16 instruction")
853 #define UNPRED_REG(R) _("using " R " results in unpredictable behaviour")
854 #define THUMB1_RELOC_ONLY _("relocation valid in thumb1 code only")
856 static struct hash_control * arm_ops_hsh;
857 static struct hash_control * arm_cond_hsh;
858 static struct hash_control * arm_shift_hsh;
859 static struct hash_control * arm_psr_hsh;
860 static struct hash_control * arm_v7m_psr_hsh;
861 static struct hash_control * arm_reg_hsh;
862 static struct hash_control * arm_reloc_hsh;
863 static struct hash_control * arm_barrier_opt_hsh;
865 /* Stuff needed to resolve the label ambiguity
874 symbolS * last_label_seen;
875 static int label_is_thumb_function_name = FALSE;
877 /* Literal pool structure. Held on a per-section
878 and per-sub-section basis. */
880 #define MAX_LITERAL_POOL_SIZE 1024
881 typedef struct literal_pool
883 expressionS literals [MAX_LITERAL_POOL_SIZE];
884 unsigned int next_free_entry;
890 struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
892 struct literal_pool * next;
893 unsigned int alignment;
896 /* Pointer to a linked list of literal pools. */
897 literal_pool * list_of_pools = NULL;
899 typedef enum asmfunc_states
902 WAITING_ASMFUNC_NAME,
906 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
909 # define now_it seg_info (now_seg)->tc_segment_info_data.current_it
911 static struct current_it now_it;
915 now_it_compatible (int cond)
917 return (cond & ~1) == (now_it.cc & ~1);
921 conditional_insn (void)
923 return inst.cond != COND_ALWAYS;
926 static int in_it_block (void);
928 static int handle_it_state (void);
930 static void force_automatic_it_block_close (void);
932 static void it_fsm_post_encode (void);
934 #define set_it_insn_type(type) \
937 inst.it_insn_type = type; \
938 if (handle_it_state () == FAIL) \
943 #define set_it_insn_type_nonvoid(type, failret) \
946 inst.it_insn_type = type; \
947 if (handle_it_state () == FAIL) \
952 #define set_it_insn_type_last() \
955 if (inst.cond == COND_ALWAYS) \
956 set_it_insn_type (IF_INSIDE_IT_LAST_INSN); \
958 set_it_insn_type (INSIDE_IT_LAST_INSN); \
964 /* This array holds the chars that always start a comment. If the
965 pre-processor is disabled, these aren't very useful. */
966 char arm_comment_chars[] = "@";
968 /* This array holds the chars that only start a comment at the beginning of
969 a line. If the line seems to have the form '# 123 filename'
970 .line and .file directives will appear in the pre-processed output. */
971 /* Note that input_file.c hand checks for '#' at the beginning of the
972 first line of the input file. This is because the compiler outputs
973 #NO_APP at the beginning of its output. */
974 /* Also note that comments like this one will always work. */
975 const char line_comment_chars[] = "#";
977 char arm_line_separator_chars[] = ";";
979 /* Chars that can be used to separate mant
980 from exp in floating point numbers. */
981 const char EXP_CHARS[] = "eE";
983 /* Chars that mean this number is a floating point constant. */
987 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
989 /* Prefix characters that indicate the start of an immediate
991 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
993 /* Separator character handling. */
995 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
998 skip_past_char (char ** str, char c)
1000 /* PR gas/14987: Allow for whitespace before the expected character. */
1001 skip_whitespace (*str);
1012 #define skip_past_comma(str) skip_past_char (str, ',')
1014 /* Arithmetic expressions (possibly involving symbols). */
1016 /* Return TRUE if anything in the expression is a bignum. */
1019 walk_no_bignums (symbolS * sp)
1021 if (symbol_get_value_expression (sp)->X_op == O_big)
1024 if (symbol_get_value_expression (sp)->X_add_symbol)
1026 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1027 || (symbol_get_value_expression (sp)->X_op_symbol
1028 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1034 static bfd_boolean in_my_get_expression = FALSE;
1036 /* Third argument to my_get_expression. */
1037 #define GE_NO_PREFIX 0
1038 #define GE_IMM_PREFIX 1
1039 #define GE_OPT_PREFIX 2
1040 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1041 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
1042 #define GE_OPT_PREFIX_BIG 3
1045 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1049 /* In unified syntax, all prefixes are optional. */
1051 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1054 switch (prefix_mode)
1056 case GE_NO_PREFIX: break;
1058 if (!is_immediate_prefix (**str))
1060 inst.error = _("immediate expression requires a # prefix");
1066 case GE_OPT_PREFIX_BIG:
1067 if (is_immediate_prefix (**str))
1074 memset (ep, 0, sizeof (expressionS));
1076 save_in = input_line_pointer;
1077 input_line_pointer = *str;
1078 in_my_get_expression = TRUE;
1080 in_my_get_expression = FALSE;
1082 if (ep->X_op == O_illegal || ep->X_op == O_absent)
1084 /* We found a bad or missing expression in md_operand(). */
1085 *str = input_line_pointer;
1086 input_line_pointer = save_in;
1087 if (inst.error == NULL)
1088 inst.error = (ep->X_op == O_absent
1089 ? _("missing expression") :_("bad expression"));
1093 /* Get rid of any bignums now, so that we don't generate an error for which
1094 we can't establish a line number later on. Big numbers are never valid
1095 in instructions, which is where this routine is always called. */
1096 if (prefix_mode != GE_OPT_PREFIX_BIG
1097 && (ep->X_op == O_big
1098 || (ep->X_add_symbol
1099 && (walk_no_bignums (ep->X_add_symbol)
1101 && walk_no_bignums (ep->X_op_symbol))))))
1103 inst.error = _("invalid constant");
1104 *str = input_line_pointer;
1105 input_line_pointer = save_in;
1109 *str = input_line_pointer;
1110 input_line_pointer = save_in;
1114 /* Turn a string in input_line_pointer into a floating point constant
1115 of type TYPE, and store the appropriate bytes in *LITP. The number
1116 of LITTLENUMS emitted is stored in *SIZEP. An error message is
1117 returned, or NULL on OK.
1119 Note that fp constants aren't represent in the normal way on the ARM.
1120 In big endian mode, things are as expected. However, in little endian
1121 mode fp constants are big-endian word-wise, and little-endian byte-wise
1122 within the words. For example, (double) 1.1 in big endian mode is
1123 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1124 the byte sequence 99 99 f1 3f 9a 99 99 99.
1126 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
1129 md_atof (int type, char * litP, int * sizeP)
1132 LITTLENUM_TYPE words[MAX_LITTLENUMS];
1164 return _("Unrecognized or unsupported floating point constant");
1167 t = atof_ieee (input_line_pointer, type, words);
1169 input_line_pointer = t;
1170 *sizeP = prec * sizeof (LITTLENUM_TYPE);
1172 if (target_big_endian)
1174 for (i = 0; i < prec; i++)
1176 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1177 litP += sizeof (LITTLENUM_TYPE);
1182 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1183 for (i = prec - 1; i >= 0; i--)
1185 md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1186 litP += sizeof (LITTLENUM_TYPE);
1189 /* For a 4 byte float the order of elements in `words' is 1 0.
1190 For an 8 byte float the order is 1 0 3 2. */
1191 for (i = 0; i < prec; i += 2)
1193 md_number_to_chars (litP, (valueT) words[i + 1],
1194 sizeof (LITTLENUM_TYPE));
1195 md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1196 (valueT) words[i], sizeof (LITTLENUM_TYPE));
1197 litP += 2 * sizeof (LITTLENUM_TYPE);
1204 /* We handle all bad expressions here, so that we can report the faulty
1205 instruction in the error message. */
1208 md_operand (expressionS * exp)
1210 if (in_my_get_expression)
1211 exp->X_op = O_illegal;
1214 /* Immediate values. */
1217 /* Generic immediate-value read function for use in directives.
1218 Accepts anything that 'expression' can fold to a constant.
1219 *val receives the number. */
1222 immediate_for_directive (int *val)
1225 exp.X_op = O_illegal;
1227 if (is_immediate_prefix (*input_line_pointer))
1229 input_line_pointer++;
1233 if (exp.X_op != O_constant)
1235 as_bad (_("expected #constant"));
1236 ignore_rest_of_line ();
1239 *val = exp.X_add_number;
1244 /* Register parsing. */
1246 /* Generic register parser. CCP points to what should be the
1247 beginning of a register name. If it is indeed a valid register
1248 name, advance CCP over it and return the reg_entry structure;
1249 otherwise return NULL. Does not issue diagnostics. */
1251 static struct reg_entry *
1252 arm_reg_parse_multi (char **ccp)
1256 struct reg_entry *reg;
1258 skip_whitespace (start);
1260 #ifdef REGISTER_PREFIX
1261 if (*start != REGISTER_PREFIX)
1265 #ifdef OPTIONAL_REGISTER_PREFIX
1266 if (*start == OPTIONAL_REGISTER_PREFIX)
1271 if (!ISALPHA (*p) || !is_name_beginner (*p))
1276 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1278 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1288 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1289 enum arm_reg_type type)
1291 /* Alternative syntaxes are accepted for a few register classes. */
1298 /* Generic coprocessor register names are allowed for these. */
1299 if (reg && reg->type == REG_TYPE_CN)
1304 /* For backward compatibility, a bare number is valid here. */
1306 unsigned long processor = strtoul (start, ccp, 10);
1307 if (*ccp != start && processor <= 15)
1312 case REG_TYPE_MMXWC:
1313 /* WC includes WCG. ??? I'm not sure this is true for all
1314 instructions that take WC registers. */
1315 if (reg && reg->type == REG_TYPE_MMXWCG)
1326 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1327 return value is the register number or FAIL. */
1330 arm_reg_parse (char **ccp, enum arm_reg_type type)
1333 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1336 /* Do not allow a scalar (reg+index) to parse as a register. */
1337 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1340 if (reg && reg->type == type)
1343 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1350 /* Parse a Neon type specifier. *STR should point at the leading '.'
1351 character. Does no verification at this stage that the type fits the opcode
1358 Can all be legally parsed by this function.
1360 Fills in neon_type struct pointer with parsed information, and updates STR
1361 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1362 type, FAIL if not. */
1365 parse_neon_type (struct neon_type *type, char **str)
1372 while (type->elems < NEON_MAX_TYPE_ELS)
1374 enum neon_el_type thistype = NT_untyped;
1375 unsigned thissize = -1u;
1382 /* Just a size without an explicit type. */
1386 switch (TOLOWER (*ptr))
1388 case 'i': thistype = NT_integer; break;
1389 case 'f': thistype = NT_float; break;
1390 case 'p': thistype = NT_poly; break;
1391 case 's': thistype = NT_signed; break;
1392 case 'u': thistype = NT_unsigned; break;
1394 thistype = NT_float;
1399 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1405 /* .f is an abbreviation for .f32. */
1406 if (thistype == NT_float && !ISDIGIT (*ptr))
1411 thissize = strtoul (ptr, &ptr, 10);
1413 if (thissize != 8 && thissize != 16 && thissize != 32
1416 as_bad (_("bad size %d in type specifier"), thissize);
1424 type->el[type->elems].type = thistype;
1425 type->el[type->elems].size = thissize;
1430 /* Empty/missing type is not a successful parse. */
1431 if (type->elems == 0)
1439 /* Errors may be set multiple times during parsing or bit encoding
1440 (particularly in the Neon bits), but usually the earliest error which is set
1441 will be the most meaningful. Avoid overwriting it with later (cascading)
1442 errors by calling this function. */
1445 first_error (const char *err)
1451 /* Parse a single type, e.g. ".s32", leading period included. */
1453 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1456 struct neon_type optype;
1460 if (parse_neon_type (&optype, &str) == SUCCESS)
1462 if (optype.elems == 1)
1463 *vectype = optype.el[0];
1466 first_error (_("only one type should be specified for operand"));
1472 first_error (_("vector type expected"));
1484 /* Special meanings for indices (which have a range of 0-7), which will fit into
1487 #define NEON_ALL_LANES 15
1488 #define NEON_INTERLEAVE_LANES 14
1490 /* Parse either a register or a scalar, with an optional type. Return the
1491 register number, and optionally fill in the actual type of the register
1492 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1493 type/index information in *TYPEINFO. */
1496 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1497 enum arm_reg_type *rtype,
1498 struct neon_typed_alias *typeinfo)
1501 struct reg_entry *reg = arm_reg_parse_multi (&str);
1502 struct neon_typed_alias atype;
1503 struct neon_type_el parsetype;
1507 atype.eltype.type = NT_invtype;
1508 atype.eltype.size = -1;
1510 /* Try alternate syntax for some types of register. Note these are mutually
1511 exclusive with the Neon syntax extensions. */
1514 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1522 /* Undo polymorphism when a set of register types may be accepted. */
1523 if ((type == REG_TYPE_NDQ
1524 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1525 || (type == REG_TYPE_VFSD
1526 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1527 || (type == REG_TYPE_NSDQ
1528 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1529 || reg->type == REG_TYPE_NQ))
1530 || (type == REG_TYPE_NSD
1531 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1532 || (type == REG_TYPE_MMXWC
1533 && (reg->type == REG_TYPE_MMXWCG)))
1534 type = (enum arm_reg_type) reg->type;
1536 if (type != reg->type)
1542 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1544 if ((atype.defined & NTA_HASTYPE) != 0)
1546 first_error (_("can't redefine type for operand"));
1549 atype.defined |= NTA_HASTYPE;
1550 atype.eltype = parsetype;
1553 if (skip_past_char (&str, '[') == SUCCESS)
1555 if (type != REG_TYPE_VFD
1556 && !(type == REG_TYPE_VFS
1557 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2)))
1559 first_error (_("only D registers may be indexed"));
1563 if ((atype.defined & NTA_HASINDEX) != 0)
1565 first_error (_("can't change index for operand"));
1569 atype.defined |= NTA_HASINDEX;
1571 if (skip_past_char (&str, ']') == SUCCESS)
1572 atype.index = NEON_ALL_LANES;
1577 my_get_expression (&exp, &str, GE_NO_PREFIX);
1579 if (exp.X_op != O_constant)
1581 first_error (_("constant expression required"));
1585 if (skip_past_char (&str, ']') == FAIL)
1588 atype.index = exp.X_add_number;
1603 /* Like arm_reg_parse, but allow allow the following extra features:
1604 - If RTYPE is non-zero, return the (possibly restricted) type of the
1605 register (e.g. Neon double or quad reg when either has been requested).
1606 - If this is a Neon vector type with additional type information, fill
1607 in the struct pointed to by VECTYPE (if non-NULL).
1608 This function will fault on encountering a scalar. */
1611 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1612 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1614 struct neon_typed_alias atype;
1616 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1621 /* Do not allow regname(... to parse as a register. */
1625 /* Do not allow a scalar (reg+index) to parse as a register. */
1626 if ((atype.defined & NTA_HASINDEX) != 0)
1628 first_error (_("register operand expected, but got scalar"));
1633 *vectype = atype.eltype;
1640 #define NEON_SCALAR_REG(X) ((X) >> 4)
1641 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1643 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1644 have enough information to be able to do a good job bounds-checking. So, we
1645 just do easy checks here, and do further checks later. */
1648 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1652 struct neon_typed_alias atype;
1653 enum arm_reg_type reg_type = REG_TYPE_VFD;
1656 reg_type = REG_TYPE_VFS;
1658 reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1660 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1663 if (atype.index == NEON_ALL_LANES)
1665 first_error (_("scalar must have an index"));
1668 else if (atype.index >= 64 / elsize)
1670 first_error (_("scalar index out of range"));
1675 *type = atype.eltype;
1679 return reg * 16 + atype.index;
1682 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1685 parse_reg_list (char ** strp)
1687 char * str = * strp;
1691 /* We come back here if we get ranges concatenated by '+' or '|'. */
1694 skip_whitespace (str);
1708 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1710 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1720 first_error (_("bad range in register list"));
1724 for (i = cur_reg + 1; i < reg; i++)
1726 if (range & (1 << i))
1728 (_("Warning: duplicated register (r%d) in register list"),
1736 if (range & (1 << reg))
1737 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1739 else if (reg <= cur_reg)
1740 as_tsktsk (_("Warning: register range not in ascending order"));
1745 while (skip_past_comma (&str) != FAIL
1746 || (in_range = 1, *str++ == '-'));
1749 if (skip_past_char (&str, '}') == FAIL)
1751 first_error (_("missing `}'"));
1759 if (my_get_expression (&exp, &str, GE_NO_PREFIX))
1762 if (exp.X_op == O_constant)
1764 if (exp.X_add_number
1765 != (exp.X_add_number & 0x0000ffff))
1767 inst.error = _("invalid register mask");
1771 if ((range & exp.X_add_number) != 0)
1773 int regno = range & exp.X_add_number;
1776 regno = (1 << regno) - 1;
1778 (_("Warning: duplicated register (r%d) in register list"),
1782 range |= exp.X_add_number;
1786 if (inst.reloc.type != 0)
1788 inst.error = _("expression too complex");
1792 memcpy (&inst.reloc.exp, &exp, sizeof (expressionS));
1793 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1794 inst.reloc.pc_rel = 0;
1798 if (*str == '|' || *str == '+')
1804 while (another_range);
1810 /* Types of registers in a list. */
1819 /* Parse a VFP register list. If the string is invalid return FAIL.
1820 Otherwise return the number of registers, and set PBASE to the first
1821 register. Parses registers of type ETYPE.
1822 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1823 - Q registers can be used to specify pairs of D registers
1824 - { } can be omitted from around a singleton register list
1825 FIXME: This is not implemented, as it would require backtracking in
1828 This could be done (the meaning isn't really ambiguous), but doesn't
1829 fit in well with the current parsing framework.
1830 - 32 D registers may be used (also true for VFPv3).
1831 FIXME: Types are ignored in these register lists, which is probably a
1835 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1840 enum arm_reg_type regtype = (enum arm_reg_type) 0;
1844 unsigned long mask = 0;
1847 if (skip_past_char (&str, '{') == FAIL)
1849 inst.error = _("expecting {");
1856 regtype = REG_TYPE_VFS;
1861 regtype = REG_TYPE_VFD;
1864 case REGLIST_NEON_D:
1865 regtype = REG_TYPE_NDQ;
1869 if (etype != REGLIST_VFP_S)
1871 /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant. */
1872 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
1876 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1879 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1886 base_reg = max_regs;
1890 int setmask = 1, addregs = 1;
1892 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
1894 if (new_base == FAIL)
1896 first_error (_(reg_expected_msgs[regtype]));
1900 if (new_base >= max_regs)
1902 first_error (_("register out of range in list"));
1906 /* Note: a value of 2 * n is returned for the register Q<n>. */
1907 if (regtype == REG_TYPE_NQ)
1913 if (new_base < base_reg)
1914 base_reg = new_base;
1916 if (mask & (setmask << new_base))
1918 first_error (_("invalid register list"));
1922 if ((mask >> new_base) != 0 && ! warned)
1924 as_tsktsk (_("register list not in ascending order"));
1928 mask |= setmask << new_base;
1931 if (*str == '-') /* We have the start of a range expression */
1937 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1940 inst.error = gettext (reg_expected_msgs[regtype]);
1944 if (high_range >= max_regs)
1946 first_error (_("register out of range in list"));
1950 if (regtype == REG_TYPE_NQ)
1951 high_range = high_range + 1;
1953 if (high_range <= new_base)
1955 inst.error = _("register range not in ascending order");
1959 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1961 if (mask & (setmask << new_base))
1963 inst.error = _("invalid register list");
1967 mask |= setmask << new_base;
1972 while (skip_past_comma (&str) != FAIL);
1976 /* Sanity check -- should have raised a parse error above. */
1977 if (count == 0 || count > max_regs)
1982 /* Final test -- the registers must be consecutive. */
1984 for (i = 0; i < count; i++)
1986 if ((mask & (1u << i)) == 0)
1988 inst.error = _("non-contiguous register range");
1998 /* True if two alias types are the same. */
2001 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2009 if (a->defined != b->defined)
2012 if ((a->defined & NTA_HASTYPE) != 0
2013 && (a->eltype.type != b->eltype.type
2014 || a->eltype.size != b->eltype.size))
2017 if ((a->defined & NTA_HASINDEX) != 0
2018 && (a->index != b->index))
2024 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2025 The base register is put in *PBASE.
2026 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2028 The register stride (minus one) is put in bit 4 of the return value.
2029 Bits [6:5] encode the list length (minus one).
2030 The type of the list elements is put in *ELTYPE, if non-NULL. */
2032 #define NEON_LANE(X) ((X) & 0xf)
2033 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
2034 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
2037 parse_neon_el_struct_list (char **str, unsigned *pbase,
2038 struct neon_type_el *eltype)
2045 int leading_brace = 0;
2046 enum arm_reg_type rtype = REG_TYPE_NDQ;
2047 const char *const incr_error = _("register stride must be 1 or 2");
2048 const char *const type_error = _("mismatched element/structure types in list");
2049 struct neon_typed_alias firsttype;
2050 firsttype.defined = 0;
2051 firsttype.eltype.type = NT_invtype;
2052 firsttype.eltype.size = -1;
2053 firsttype.index = -1;
2055 if (skip_past_char (&ptr, '{') == SUCCESS)
2060 struct neon_typed_alias atype;
2061 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2065 first_error (_(reg_expected_msgs[rtype]));
2072 if (rtype == REG_TYPE_NQ)
2078 else if (reg_incr == -1)
2080 reg_incr = getreg - base_reg;
2081 if (reg_incr < 1 || reg_incr > 2)
2083 first_error (_(incr_error));
2087 else if (getreg != base_reg + reg_incr * count)
2089 first_error (_(incr_error));
2093 if (! neon_alias_types_same (&atype, &firsttype))
2095 first_error (_(type_error));
2099 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2103 struct neon_typed_alias htype;
2104 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2106 lane = NEON_INTERLEAVE_LANES;
2107 else if (lane != NEON_INTERLEAVE_LANES)
2109 first_error (_(type_error));
2114 else if (reg_incr != 1)
2116 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2120 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2123 first_error (_(reg_expected_msgs[rtype]));
2126 if (! neon_alias_types_same (&htype, &firsttype))
2128 first_error (_(type_error));
2131 count += hireg + dregs - getreg;
2135 /* If we're using Q registers, we can't use [] or [n] syntax. */
2136 if (rtype == REG_TYPE_NQ)
2142 if ((atype.defined & NTA_HASINDEX) != 0)
2146 else if (lane != atype.index)
2148 first_error (_(type_error));
2152 else if (lane == -1)
2153 lane = NEON_INTERLEAVE_LANES;
2154 else if (lane != NEON_INTERLEAVE_LANES)
2156 first_error (_(type_error));
2161 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2163 /* No lane set by [x]. We must be interleaving structures. */
2165 lane = NEON_INTERLEAVE_LANES;
2168 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
2169 || (count > 1 && reg_incr == -1))
2171 first_error (_("error parsing element/structure list"));
2175 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2177 first_error (_("expected }"));
2185 *eltype = firsttype.eltype;
2190 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2193 /* Parse an explicit relocation suffix on an expression. This is
2194 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
2195 arm_reloc_hsh contains no entries, so this function can only
2196 succeed if there is no () after the word. Returns -1 on error,
2197 BFD_RELOC_UNUSED if there wasn't any suffix. */
2200 parse_reloc (char **str)
2202 struct reloc_entry *r;
2206 return BFD_RELOC_UNUSED;
2211 while (*q && *q != ')' && *q != ',')
2216 if ((r = (struct reloc_entry *)
2217 hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2224 /* Directives: register aliases. */
2226 static struct reg_entry *
2227 insert_reg_alias (char *str, unsigned number, int type)
2229 struct reg_entry *new_reg;
2232 if ((new_reg = (struct reg_entry *) hash_find (arm_reg_hsh, str)) != 0)
2234 if (new_reg->builtin)
2235 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2237 /* Only warn about a redefinition if it's not defined as the
2239 else if (new_reg->number != number || new_reg->type != type)
2240 as_warn (_("ignoring redefinition of register alias '%s'"), str);
2245 name = xstrdup (str);
2246 new_reg = XNEW (struct reg_entry);
2248 new_reg->name = name;
2249 new_reg->number = number;
2250 new_reg->type = type;
2251 new_reg->builtin = FALSE;
2252 new_reg->neon = NULL;
2254 if (hash_insert (arm_reg_hsh, name, (void *) new_reg))
2261 insert_neon_reg_alias (char *str, int number, int type,
2262 struct neon_typed_alias *atype)
2264 struct reg_entry *reg = insert_reg_alias (str, number, type);
2268 first_error (_("attempt to redefine typed alias"));
2274 reg->neon = XNEW (struct neon_typed_alias);
2275 *reg->neon = *atype;
2279 /* Look for the .req directive. This is of the form:
2281 new_register_name .req existing_register_name
2283 If we find one, or if it looks sufficiently like one that we want to
2284 handle any error here, return TRUE. Otherwise return FALSE. */
2287 create_register_alias (char * newname, char *p)
2289 struct reg_entry *old;
2290 char *oldname, *nbuf;
2293 /* The input scrubber ensures that whitespace after the mnemonic is
2294 collapsed to single spaces. */
2296 if (strncmp (oldname, " .req ", 6) != 0)
2300 if (*oldname == '\0')
2303 old = (struct reg_entry *) hash_find (arm_reg_hsh, oldname);
2306 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2310 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2311 the desired alias name, and p points to its end. If not, then
2312 the desired alias name is in the global original_case_string. */
2313 #ifdef TC_CASE_SENSITIVE
2316 newname = original_case_string;
2317 nlen = strlen (newname);
2320 nbuf = xmemdup0 (newname, nlen);
2322 /* Create aliases under the new name as stated; an all-lowercase
2323 version of the new name; and an all-uppercase version of the new
2325 if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2327 for (p = nbuf; *p; p++)
2330 if (strncmp (nbuf, newname, nlen))
2332 /* If this attempt to create an additional alias fails, do not bother
2333 trying to create the all-lower case alias. We will fail and issue
2334 a second, duplicate error message. This situation arises when the
2335 programmer does something like:
2338 The second .req creates the "Foo" alias but then fails to create
2339 the artificial FOO alias because it has already been created by the
2341 if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2348 for (p = nbuf; *p; p++)
2351 if (strncmp (nbuf, newname, nlen))
2352 insert_reg_alias (nbuf, old->number, old->type);
2359 /* Create a Neon typed/indexed register alias using directives, e.g.:
2364 These typed registers can be used instead of the types specified after the
2365 Neon mnemonic, so long as all operands given have types. Types can also be
2366 specified directly, e.g.:
2367 vadd d0.s32, d1.s32, d2.s32 */
2370 create_neon_reg_alias (char *newname, char *p)
2372 enum arm_reg_type basetype;
2373 struct reg_entry *basereg;
2374 struct reg_entry mybasereg;
2375 struct neon_type ntype;
2376 struct neon_typed_alias typeinfo;
2377 char *namebuf, *nameend ATTRIBUTE_UNUSED;
2380 typeinfo.defined = 0;
2381 typeinfo.eltype.type = NT_invtype;
2382 typeinfo.eltype.size = -1;
2383 typeinfo.index = -1;
2387 if (strncmp (p, " .dn ", 5) == 0)
2388 basetype = REG_TYPE_VFD;
2389 else if (strncmp (p, " .qn ", 5) == 0)
2390 basetype = REG_TYPE_NQ;
2399 basereg = arm_reg_parse_multi (&p);
2401 if (basereg && basereg->type != basetype)
2403 as_bad (_("bad type for register"));
2407 if (basereg == NULL)
2410 /* Try parsing as an integer. */
2411 my_get_expression (&exp, &p, GE_NO_PREFIX);
2412 if (exp.X_op != O_constant)
2414 as_bad (_("expression must be constant"));
2417 basereg = &mybasereg;
2418 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2424 typeinfo = *basereg->neon;
2426 if (parse_neon_type (&ntype, &p) == SUCCESS)
2428 /* We got a type. */
2429 if (typeinfo.defined & NTA_HASTYPE)
2431 as_bad (_("can't redefine the type of a register alias"));
2435 typeinfo.defined |= NTA_HASTYPE;
2436 if (ntype.elems != 1)
2438 as_bad (_("you must specify a single type only"));
2441 typeinfo.eltype = ntype.el[0];
2444 if (skip_past_char (&p, '[') == SUCCESS)
2447 /* We got a scalar index. */
2449 if (typeinfo.defined & NTA_HASINDEX)
2451 as_bad (_("can't redefine the index of a scalar alias"));
2455 my_get_expression (&exp, &p, GE_NO_PREFIX);
2457 if (exp.X_op != O_constant)
2459 as_bad (_("scalar index must be constant"));
2463 typeinfo.defined |= NTA_HASINDEX;
2464 typeinfo.index = exp.X_add_number;
2466 if (skip_past_char (&p, ']') == FAIL)
2468 as_bad (_("expecting ]"));
2473 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2474 the desired alias name, and p points to its end. If not, then
2475 the desired alias name is in the global original_case_string. */
2476 #ifdef TC_CASE_SENSITIVE
2477 namelen = nameend - newname;
2479 newname = original_case_string;
2480 namelen = strlen (newname);
2483 namebuf = xmemdup0 (newname, namelen);
2485 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2486 typeinfo.defined != 0 ? &typeinfo : NULL);
2488 /* Insert name in all uppercase. */
2489 for (p = namebuf; *p; p++)
2492 if (strncmp (namebuf, newname, namelen))
2493 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2494 typeinfo.defined != 0 ? &typeinfo : NULL);
2496 /* Insert name in all lowercase. */
2497 for (p = namebuf; *p; p++)
2500 if (strncmp (namebuf, newname, namelen))
2501 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2502 typeinfo.defined != 0 ? &typeinfo : NULL);
2508 /* Should never be called, as .req goes between the alias and the
2509 register name, not at the beginning of the line. */
2512 s_req (int a ATTRIBUTE_UNUSED)
2514 as_bad (_("invalid syntax for .req directive"));
2518 s_dn (int a ATTRIBUTE_UNUSED)
2520 as_bad (_("invalid syntax for .dn directive"));
2524 s_qn (int a ATTRIBUTE_UNUSED)
2526 as_bad (_("invalid syntax for .qn directive"));
2529 /* The .unreq directive deletes an alias which was previously defined
2530 by .req. For example:
2536 s_unreq (int a ATTRIBUTE_UNUSED)
2541 name = input_line_pointer;
2543 while (*input_line_pointer != 0
2544 && *input_line_pointer != ' '
2545 && *input_line_pointer != '\n')
2546 ++input_line_pointer;
2548 saved_char = *input_line_pointer;
2549 *input_line_pointer = 0;
2552 as_bad (_("invalid syntax for .unreq directive"));
2555 struct reg_entry *reg = (struct reg_entry *) hash_find (arm_reg_hsh,
2559 as_bad (_("unknown register alias '%s'"), name);
2560 else if (reg->builtin)
2561 as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2568 hash_delete (arm_reg_hsh, name, FALSE);
2569 free ((char *) reg->name);
2574 /* Also locate the all upper case and all lower case versions.
2575 Do not complain if we cannot find one or the other as it
2576 was probably deleted above. */
2578 nbuf = strdup (name);
2579 for (p = nbuf; *p; p++)
2581 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2584 hash_delete (arm_reg_hsh, nbuf, FALSE);
2585 free ((char *) reg->name);
2591 for (p = nbuf; *p; p++)
2593 reg = (struct reg_entry *) hash_find (arm_reg_hsh, nbuf);
2596 hash_delete (arm_reg_hsh, nbuf, FALSE);
2597 free ((char *) reg->name);
2607 *input_line_pointer = saved_char;
2608 demand_empty_rest_of_line ();
2611 /* Directives: Instruction set selection. */
2614 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2615 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2616 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2617 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2619 /* Create a new mapping symbol for the transition to STATE. */
2622 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2625 const char * symname;
2632 type = BSF_NO_FLAGS;
2636 type = BSF_NO_FLAGS;
2640 type = BSF_NO_FLAGS;
2646 symbolP = symbol_new (symname, now_seg, value, frag);
2647 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2652 THUMB_SET_FUNC (symbolP, 0);
2653 ARM_SET_THUMB (symbolP, 0);
2654 ARM_SET_INTERWORK (symbolP, support_interwork);
2658 THUMB_SET_FUNC (symbolP, 1);
2659 ARM_SET_THUMB (symbolP, 1);
2660 ARM_SET_INTERWORK (symbolP, support_interwork);
2668 /* Save the mapping symbols for future reference. Also check that
2669 we do not place two mapping symbols at the same offset within a
2670 frag. We'll handle overlap between frags in
2671 check_mapping_symbols.
2673 If .fill or other data filling directive generates zero sized data,
2674 the mapping symbol for the following code will have the same value
2675 as the one generated for the data filling directive. In this case,
2676 we replace the old symbol with the new one at the same address. */
2679 if (frag->tc_frag_data.first_map != NULL)
2681 know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
2682 symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
2684 frag->tc_frag_data.first_map = symbolP;
2686 if (frag->tc_frag_data.last_map != NULL)
2688 know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
2689 if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
2690 symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
2692 frag->tc_frag_data.last_map = symbolP;
2695 /* We must sometimes convert a region marked as code to data during
2696 code alignment, if an odd number of bytes have to be padded. The
2697 code mapping symbol is pushed to an aligned address. */
2700 insert_data_mapping_symbol (enum mstate state,
2701 valueT value, fragS *frag, offsetT bytes)
2703 /* If there was already a mapping symbol, remove it. */
2704 if (frag->tc_frag_data.last_map != NULL
2705 && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
2707 symbolS *symp = frag->tc_frag_data.last_map;
2711 know (frag->tc_frag_data.first_map == symp);
2712 frag->tc_frag_data.first_map = NULL;
2714 frag->tc_frag_data.last_map = NULL;
2715 symbol_remove (symp, &symbol_rootP, &symbol_lastP);
2718 make_mapping_symbol (MAP_DATA, value, frag);
2719 make_mapping_symbol (state, value + bytes, frag);
2722 static void mapping_state_2 (enum mstate state, int max_chars);
2724 /* Set the mapping state to STATE. Only call this when about to
2725 emit some STATE bytes to the file. */
2727 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
2729 mapping_state (enum mstate state)
2731 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2733 if (mapstate == state)
2734 /* The mapping symbol has already been emitted.
2735 There is nothing else to do. */
2738 if (state == MAP_ARM || state == MAP_THUMB)
2740 All ARM instructions require 4-byte alignment.
2741 (Almost) all Thumb instructions require 2-byte alignment.
2743 When emitting instructions into any section, mark the section
2746 Some Thumb instructions are alignment-sensitive modulo 4 bytes,
2747 but themselves require 2-byte alignment; this applies to some
2748 PC- relative forms. However, these cases will involve implicit
2749 literal pool generation or an explicit .align >=2, both of
2750 which will cause the section to me marked with sufficient
2751 alignment. Thus, we don't handle those cases here. */
2752 record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
2754 if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
2755 /* This case will be evaluated later. */
2758 mapping_state_2 (state, 0);
2761 /* Same as mapping_state, but MAX_CHARS bytes have already been
2762 allocated. Put the mapping symbol that far back. */
2765 mapping_state_2 (enum mstate state, int max_chars)
2767 enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2769 if (!SEG_NORMAL (now_seg))
2772 if (mapstate == state)
2773 /* The mapping symbol has already been emitted.
2774 There is nothing else to do. */
2777 if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
2778 || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
2780 struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
2781 const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
2784 make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
2787 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2788 make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
2792 #define mapping_state(x) ((void)0)
2793 #define mapping_state_2(x, y) ((void)0)
2796 /* Find the real, Thumb encoded start of a Thumb function. */
2800 find_real_start (symbolS * symbolP)
2803 const char * name = S_GET_NAME (symbolP);
2804 symbolS * new_target;
2806 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2807 #define STUB_NAME ".real_start_of"
2812 /* The compiler may generate BL instructions to local labels because
2813 it needs to perform a branch to a far away location. These labels
2814 do not have a corresponding ".real_start_of" label. We check
2815 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2816 the ".real_start_of" convention for nonlocal branches. */
2817 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2820 real_start = concat (STUB_NAME, name, NULL);
2821 new_target = symbol_find (real_start);
2824 if (new_target == NULL)
2826 as_warn (_("Failed to find real start of function: %s\n"), name);
2827 new_target = symbolP;
2835 opcode_select (int width)
2842 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2843 as_bad (_("selected processor does not support THUMB opcodes"));
2846 /* No need to force the alignment, since we will have been
2847 coming from ARM mode, which is word-aligned. */
2848 record_alignment (now_seg, 1);
2855 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2856 as_bad (_("selected processor does not support ARM opcodes"));
2861 frag_align (2, 0, 0);
2863 record_alignment (now_seg, 1);
2868 as_bad (_("invalid instruction size selected (%d)"), width);
2873 s_arm (int ignore ATTRIBUTE_UNUSED)
2876 demand_empty_rest_of_line ();
2880 s_thumb (int ignore ATTRIBUTE_UNUSED)
2883 demand_empty_rest_of_line ();
2887 s_code (int unused ATTRIBUTE_UNUSED)
2891 temp = get_absolute_expression ();
2896 opcode_select (temp);
2900 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2905 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2907 /* If we are not already in thumb mode go into it, EVEN if
2908 the target processor does not support thumb instructions.
2909 This is used by gcc/config/arm/lib1funcs.asm for example
2910 to compile interworking support functions even if the
2911 target processor should not support interworking. */
2915 record_alignment (now_seg, 1);
2918 demand_empty_rest_of_line ();
2922 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2926 /* The following label is the name/address of the start of a Thumb function.
2927 We need to know this for the interworking support. */
2928 label_is_thumb_function_name = TRUE;
2931 /* Perform a .set directive, but also mark the alias as
2932 being a thumb function. */
2935 s_thumb_set (int equiv)
2937 /* XXX the following is a duplicate of the code for s_set() in read.c
2938 We cannot just call that code as we need to get at the symbol that
2945 /* Especial apologies for the random logic:
2946 This just grew, and could be parsed much more simply!
2948 delim = get_symbol_name (& name);
2949 end_name = input_line_pointer;
2950 (void) restore_line_pointer (delim);
2952 if (*input_line_pointer != ',')
2955 as_bad (_("expected comma after name \"%s\""), name);
2957 ignore_rest_of_line ();
2961 input_line_pointer++;
2964 if (name[0] == '.' && name[1] == '\0')
2966 /* XXX - this should not happen to .thumb_set. */
2970 if ((symbolP = symbol_find (name)) == NULL
2971 && (symbolP = md_undefined_symbol (name)) == NULL)
2974 /* When doing symbol listings, play games with dummy fragments living
2975 outside the normal fragment chain to record the file and line info
2977 if (listing & LISTING_SYMBOLS)
2979 extern struct list_info_struct * listing_tail;
2980 fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
2982 memset (dummy_frag, 0, sizeof (fragS));
2983 dummy_frag->fr_type = rs_fill;
2984 dummy_frag->line = listing_tail;
2985 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2986 dummy_frag->fr_symbol = symbolP;
2990 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2993 /* "set" symbols are local unless otherwise specified. */
2994 SF_SET_LOCAL (symbolP);
2995 #endif /* OBJ_COFF */
2996 } /* Make a new symbol. */
2998 symbol_table_insert (symbolP);
3003 && S_IS_DEFINED (symbolP)
3004 && S_GET_SEGMENT (symbolP) != reg_section)
3005 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3007 pseudo_set (symbolP);
3009 demand_empty_rest_of_line ();
3011 /* XXX Now we come to the Thumb specific bit of code. */
3013 THUMB_SET_FUNC (symbolP, 1);
3014 ARM_SET_THUMB (symbolP, 1);
3015 #if defined OBJ_ELF || defined OBJ_COFF
3016 ARM_SET_INTERWORK (symbolP, support_interwork);
3020 /* Directives: Mode selection. */
3022 /* .syntax [unified|divided] - choose the new unified syntax
3023 (same for Arm and Thumb encoding, modulo slight differences in what
3024 can be represented) or the old divergent syntax for each mode. */
3026 s_syntax (int unused ATTRIBUTE_UNUSED)
3030 delim = get_symbol_name (& name);
3032 if (!strcasecmp (name, "unified"))
3033 unified_syntax = TRUE;
3034 else if (!strcasecmp (name, "divided"))
3035 unified_syntax = FALSE;
3038 as_bad (_("unrecognized syntax mode \"%s\""), name);
3041 (void) restore_line_pointer (delim);
3042 demand_empty_rest_of_line ();
3045 /* Directives: sectioning and alignment. */
3048 s_bss (int ignore ATTRIBUTE_UNUSED)
3050 /* We don't support putting frags in the BSS segment, we fake it by
3051 marking in_bss, then looking at s_skip for clues. */
3052 subseg_set (bss_section, 0);
3053 demand_empty_rest_of_line ();
3055 #ifdef md_elf_section_change_hook
3056 md_elf_section_change_hook ();
3061 s_even (int ignore ATTRIBUTE_UNUSED)
3063 /* Never make frag if expect extra pass. */
3065 frag_align (1, 0, 0);
3067 record_alignment (now_seg, 1);
3069 demand_empty_rest_of_line ();
3072 /* Directives: CodeComposer Studio. */
3074 /* .ref (for CodeComposer Studio syntax only). */
3076 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3078 if (codecomposer_syntax)
3079 ignore_rest_of_line ();
3081 as_bad (_(".ref pseudo-op only available with -mccs flag."));
3084 /* If name is not NULL, then it is used for marking the beginning of a
3085 function, whereas if it is NULL then it means the function end. */
3087 asmfunc_debug (const char * name)
3089 static const char * last_name = NULL;
3093 gas_assert (last_name == NULL);
3096 if (debug_type == DEBUG_STABS)
3097 stabs_generate_asm_func (name, name);
3101 gas_assert (last_name != NULL);
3103 if (debug_type == DEBUG_STABS)
3104 stabs_generate_asm_endfunc (last_name, last_name);
3111 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3113 if (codecomposer_syntax)
3115 switch (asmfunc_state)
3117 case OUTSIDE_ASMFUNC:
3118 asmfunc_state = WAITING_ASMFUNC_NAME;
3121 case WAITING_ASMFUNC_NAME:
3122 as_bad (_(".asmfunc repeated."));
3125 case WAITING_ENDASMFUNC:
3126 as_bad (_(".asmfunc without function."));
3129 demand_empty_rest_of_line ();
3132 as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3136 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3138 if (codecomposer_syntax)
3140 switch (asmfunc_state)
3142 case OUTSIDE_ASMFUNC:
3143 as_bad (_(".endasmfunc without a .asmfunc."));
3146 case WAITING_ASMFUNC_NAME:
3147 as_bad (_(".endasmfunc without function."));
3150 case WAITING_ENDASMFUNC:
3151 asmfunc_state = OUTSIDE_ASMFUNC;
3152 asmfunc_debug (NULL);
3155 demand_empty_rest_of_line ();
3158 as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3162 s_ccs_def (int name)
3164 if (codecomposer_syntax)
3167 as_bad (_(".def pseudo-op only available with -mccs flag."));
3170 /* Directives: Literal pools. */
3172 static literal_pool *
3173 find_literal_pool (void)
3175 literal_pool * pool;
3177 for (pool = list_of_pools; pool != NULL; pool = pool->next)
3179 if (pool->section == now_seg
3180 && pool->sub_section == now_subseg)
3187 static literal_pool *
3188 find_or_make_literal_pool (void)
3190 /* Next literal pool ID number. */
3191 static unsigned int latest_pool_num = 1;
3192 literal_pool * pool;
3194 pool = find_literal_pool ();
3198 /* Create a new pool. */
3199 pool = XNEW (literal_pool);
3203 pool->next_free_entry = 0;
3204 pool->section = now_seg;
3205 pool->sub_section = now_subseg;
3206 pool->next = list_of_pools;
3207 pool->symbol = NULL;
3208 pool->alignment = 2;
3210 /* Add it to the list. */
3211 list_of_pools = pool;
3214 /* New pools, and emptied pools, will have a NULL symbol. */
3215 if (pool->symbol == NULL)
3217 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3218 (valueT) 0, &zero_address_frag);
3219 pool->id = latest_pool_num ++;
3226 /* Add the literal in the global 'inst'
3227 structure to the relevant literal pool. */
3230 add_to_lit_pool (unsigned int nbytes)
3232 #define PADDING_SLOT 0x1
3233 #define LIT_ENTRY_SIZE_MASK 0xFF
3234 literal_pool * pool;
3235 unsigned int entry, pool_size = 0;
3236 bfd_boolean padding_slot_p = FALSE;
3242 imm1 = inst.operands[1].imm;
3243 imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3244 : inst.reloc.exp.X_unsigned ? 0
3245 : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3246 if (target_big_endian)
3249 imm2 = inst.operands[1].imm;
3253 pool = find_or_make_literal_pool ();
3255 /* Check if this literal value is already in the pool. */
3256 for (entry = 0; entry < pool->next_free_entry; entry ++)
3260 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3261 && (inst.reloc.exp.X_op == O_constant)
3262 && (pool->literals[entry].X_add_number
3263 == inst.reloc.exp.X_add_number)
3264 && (pool->literals[entry].X_md == nbytes)
3265 && (pool->literals[entry].X_unsigned
3266 == inst.reloc.exp.X_unsigned))
3269 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
3270 && (inst.reloc.exp.X_op == O_symbol)
3271 && (pool->literals[entry].X_add_number
3272 == inst.reloc.exp.X_add_number)
3273 && (pool->literals[entry].X_add_symbol
3274 == inst.reloc.exp.X_add_symbol)
3275 && (pool->literals[entry].X_op_symbol
3276 == inst.reloc.exp.X_op_symbol)
3277 && (pool->literals[entry].X_md == nbytes))
3280 else if ((nbytes == 8)
3281 && !(pool_size & 0x7)
3282 && ((entry + 1) != pool->next_free_entry)
3283 && (pool->literals[entry].X_op == O_constant)
3284 && (pool->literals[entry].X_add_number == (offsetT) imm1)
3285 && (pool->literals[entry].X_unsigned
3286 == inst.reloc.exp.X_unsigned)
3287 && (pool->literals[entry + 1].X_op == O_constant)
3288 && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3289 && (pool->literals[entry + 1].X_unsigned
3290 == inst.reloc.exp.X_unsigned))
3293 padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3294 if (padding_slot_p && (nbytes == 4))
3300 /* Do we need to create a new entry? */
3301 if (entry == pool->next_free_entry)
3303 if (entry >= MAX_LITERAL_POOL_SIZE)
3305 inst.error = _("literal pool overflow");
3311 /* For 8-byte entries, we align to an 8-byte boundary,
3312 and split it into two 4-byte entries, because on 32-bit
3313 host, 8-byte constants are treated as big num, thus
3314 saved in "generic_bignum" which will be overwritten
3315 by later assignments.
3317 We also need to make sure there is enough space for
3320 We also check to make sure the literal operand is a
3322 if (!(inst.reloc.exp.X_op == O_constant
3323 || inst.reloc.exp.X_op == O_big))
3325 inst.error = _("invalid type for literal pool");
3328 else if (pool_size & 0x7)
3330 if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3332 inst.error = _("literal pool overflow");
3336 pool->literals[entry] = inst.reloc.exp;
3337 pool->literals[entry].X_op = O_constant;
3338 pool->literals[entry].X_add_number = 0;
3339 pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3340 pool->next_free_entry += 1;
3343 else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3345 inst.error = _("literal pool overflow");
3349 pool->literals[entry] = inst.reloc.exp;
3350 pool->literals[entry].X_op = O_constant;
3351 pool->literals[entry].X_add_number = imm1;
3352 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3353 pool->literals[entry++].X_md = 4;
3354 pool->literals[entry] = inst.reloc.exp;
3355 pool->literals[entry].X_op = O_constant;
3356 pool->literals[entry].X_add_number = imm2;
3357 pool->literals[entry].X_unsigned = inst.reloc.exp.X_unsigned;
3358 pool->literals[entry].X_md = 4;
3359 pool->alignment = 3;
3360 pool->next_free_entry += 1;
3364 pool->literals[entry] = inst.reloc.exp;
3365 pool->literals[entry].X_md = 4;
3369 /* PR ld/12974: Record the location of the first source line to reference
3370 this entry in the literal pool. If it turns out during linking that the
3371 symbol does not exist we will be able to give an accurate line number for
3372 the (first use of the) missing reference. */
3373 if (debug_type == DEBUG_DWARF2)
3374 dwarf2_where (pool->locs + entry);
3376 pool->next_free_entry += 1;
3378 else if (padding_slot_p)
3380 pool->literals[entry] = inst.reloc.exp;
3381 pool->literals[entry].X_md = nbytes;
3384 inst.reloc.exp.X_op = O_symbol;
3385 inst.reloc.exp.X_add_number = pool_size;
3386 inst.reloc.exp.X_add_symbol = pool->symbol;
3392 tc_start_label_without_colon (void)
3394 bfd_boolean ret = TRUE;
3396 if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3398 const char *label = input_line_pointer;
3400 while (!is_end_of_line[(int) label[-1]])
3405 as_bad (_("Invalid label '%s'"), label);
3409 asmfunc_debug (label);
3411 asmfunc_state = WAITING_ENDASMFUNC;
3417 /* Can't use symbol_new here, so have to create a symbol and then at
3418 a later date assign it a value. That's what these functions do. */
3421 symbol_locate (symbolS * symbolP,
3422 const char * name, /* It is copied, the caller can modify. */
3423 segT segment, /* Segment identifier (SEG_<something>). */
3424 valueT valu, /* Symbol value. */
3425 fragS * frag) /* Associated fragment. */
3428 char * preserved_copy_of_name;
3430 name_length = strlen (name) + 1; /* +1 for \0. */
3431 obstack_grow (¬es, name, name_length);
3432 preserved_copy_of_name = (char *) obstack_finish (¬es);
3434 #ifdef tc_canonicalize_symbol_name
3435 preserved_copy_of_name =
3436 tc_canonicalize_symbol_name (preserved_copy_of_name);
3439 S_SET_NAME (symbolP, preserved_copy_of_name);
3441 S_SET_SEGMENT (symbolP, segment);
3442 S_SET_VALUE (symbolP, valu);
3443 symbol_clear_list_pointers (symbolP);
3445 symbol_set_frag (symbolP, frag);
3447 /* Link to end of symbol chain. */
3449 extern int symbol_table_frozen;
3451 if (symbol_table_frozen)
3455 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3457 obj_symbol_new_hook (symbolP);
3459 #ifdef tc_symbol_new_hook
3460 tc_symbol_new_hook (symbolP);
3464 verify_symbol_chain (symbol_rootP, symbol_lastP);
3465 #endif /* DEBUG_SYMS */
3469 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3472 literal_pool * pool;
3475 pool = find_literal_pool ();
3477 || pool->symbol == NULL
3478 || pool->next_free_entry == 0)
3481 /* Align pool as you have word accesses.
3482 Only make a frag if we have to. */
3484 frag_align (pool->alignment, 0, 0);
3486 record_alignment (now_seg, 2);
3489 seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3490 make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3492 sprintf (sym_name, "$$lit_\002%x", pool->id);
3494 symbol_locate (pool->symbol, sym_name, now_seg,
3495 (valueT) frag_now_fix (), frag_now);
3496 symbol_table_insert (pool->symbol);
3498 ARM_SET_THUMB (pool->symbol, thumb_mode);
3500 #if defined OBJ_COFF || defined OBJ_ELF
3501 ARM_SET_INTERWORK (pool->symbol, support_interwork);
3504 for (entry = 0; entry < pool->next_free_entry; entry ++)
3507 if (debug_type == DEBUG_DWARF2)
3508 dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3510 /* First output the expression in the instruction to the pool. */
3511 emit_expr (&(pool->literals[entry]),
3512 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3515 /* Mark the pool as empty. */
3516 pool->next_free_entry = 0;
3517 pool->symbol = NULL;
3521 /* Forward declarations for functions below, in the MD interface
3523 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3524 static valueT create_unwind_entry (int);
3525 static void start_unwind_section (const segT, int);
3526 static void add_unwind_opcode (valueT, int);
3527 static void flush_pending_unwind (void);
3529 /* Directives: Data. */
3532 s_arm_elf_cons (int nbytes)
3536 #ifdef md_flush_pending_output
3537 md_flush_pending_output ();
3540 if (is_it_end_of_statement ())
3542 demand_empty_rest_of_line ();
3546 #ifdef md_cons_align
3547 md_cons_align (nbytes);
3550 mapping_state (MAP_DATA);
3554 char *base = input_line_pointer;
3558 if (exp.X_op != O_symbol)
3559 emit_expr (&exp, (unsigned int) nbytes);
3562 char *before_reloc = input_line_pointer;
3563 reloc = parse_reloc (&input_line_pointer);
3566 as_bad (_("unrecognized relocation suffix"));
3567 ignore_rest_of_line ();
3570 else if (reloc == BFD_RELOC_UNUSED)
3571 emit_expr (&exp, (unsigned int) nbytes);
3574 reloc_howto_type *howto = (reloc_howto_type *)
3575 bfd_reloc_type_lookup (stdoutput,
3576 (bfd_reloc_code_real_type) reloc);
3577 int size = bfd_get_reloc_size (howto);
3579 if (reloc == BFD_RELOC_ARM_PLT32)
3581 as_bad (_("(plt) is only valid on branch targets"));
3582 reloc = BFD_RELOC_UNUSED;
3587 as_bad (ngettext ("%s relocations do not fit in %d byte",
3588 "%s relocations do not fit in %d bytes",
3590 howto->name, nbytes);
3593 /* We've parsed an expression stopping at O_symbol.
3594 But there may be more expression left now that we
3595 have parsed the relocation marker. Parse it again.
3596 XXX Surely there is a cleaner way to do this. */
3597 char *p = input_line_pointer;
3599 char *save_buf = XNEWVEC (char, input_line_pointer - base);
3601 memcpy (save_buf, base, input_line_pointer - base);
3602 memmove (base + (input_line_pointer - before_reloc),
3603 base, before_reloc - base);
3605 input_line_pointer = base + (input_line_pointer-before_reloc);
3607 memcpy (base, save_buf, p - base);
3609 offset = nbytes - size;
3610 p = frag_more (nbytes);
3611 memset (p, 0, nbytes);
3612 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3613 size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3619 while (*input_line_pointer++ == ',');
3621 /* Put terminator back into stream. */
3622 input_line_pointer --;
3623 demand_empty_rest_of_line ();
3626 /* Emit an expression containing a 32-bit thumb instruction.
3627 Implementation based on put_thumb32_insn. */
3630 emit_thumb32_expr (expressionS * exp)
3632 expressionS exp_high = *exp;
3634 exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3635 emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3636 exp->X_add_number &= 0xffff;
3637 emit_expr (exp, (unsigned int) THUMB_SIZE);
3640 /* Guess the instruction size based on the opcode. */
3643 thumb_insn_size (int opcode)
3645 if ((unsigned int) opcode < 0xe800u)
3647 else if ((unsigned int) opcode >= 0xe8000000u)
3654 emit_insn (expressionS *exp, int nbytes)
3658 if (exp->X_op == O_constant)
3663 size = thumb_insn_size (exp->X_add_number);
3667 if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3669 as_bad (_(".inst.n operand too big. "\
3670 "Use .inst.w instead"));
3675 if (now_it.state == AUTOMATIC_IT_BLOCK)
3676 set_it_insn_type_nonvoid (OUTSIDE_IT_INSN, 0);
3678 set_it_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
3680 if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
3681 emit_thumb32_expr (exp);
3683 emit_expr (exp, (unsigned int) size);
3685 it_fsm_post_encode ();
3689 as_bad (_("cannot determine Thumb instruction size. " \
3690 "Use .inst.n/.inst.w instead"));
3693 as_bad (_("constant expression required"));
3698 /* Like s_arm_elf_cons but do not use md_cons_align and
3699 set the mapping state to MAP_ARM/MAP_THUMB. */
3702 s_arm_elf_inst (int nbytes)
3704 if (is_it_end_of_statement ())
3706 demand_empty_rest_of_line ();
3710 /* Calling mapping_state () here will not change ARM/THUMB,
3711 but will ensure not to be in DATA state. */
3714 mapping_state (MAP_THUMB);
3719 as_bad (_("width suffixes are invalid in ARM mode"));
3720 ignore_rest_of_line ();
3726 mapping_state (MAP_ARM);
3735 if (! emit_insn (& exp, nbytes))
3737 ignore_rest_of_line ();
3741 while (*input_line_pointer++ == ',');
3743 /* Put terminator back into stream. */
3744 input_line_pointer --;
3745 demand_empty_rest_of_line ();
3748 /* Parse a .rel31 directive. */
3751 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
3758 if (*input_line_pointer == '1')
3759 highbit = 0x80000000;
3760 else if (*input_line_pointer != '0')
3761 as_bad (_("expected 0 or 1"));
3763 input_line_pointer++;
3764 if (*input_line_pointer != ',')
3765 as_bad (_("missing comma"));
3766 input_line_pointer++;
3768 #ifdef md_flush_pending_output
3769 md_flush_pending_output ();
3772 #ifdef md_cons_align
3776 mapping_state (MAP_DATA);
3781 md_number_to_chars (p, highbit, 4);
3782 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3783 BFD_RELOC_ARM_PREL31);
3785 demand_empty_rest_of_line ();
3788 /* Directives: AEABI stack-unwind tables. */
3790 /* Parse an unwind_fnstart directive. Simply records the current location. */
3793 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3795 demand_empty_rest_of_line ();
3796 if (unwind.proc_start)
3798 as_bad (_("duplicate .fnstart directive"));
3802 /* Mark the start of the function. */
3803 unwind.proc_start = expr_build_dot ();
3805 /* Reset the rest of the unwind info. */
3806 unwind.opcode_count = 0;
3807 unwind.table_entry = NULL;
3808 unwind.personality_routine = NULL;
3809 unwind.personality_index = -1;
3810 unwind.frame_size = 0;
3811 unwind.fp_offset = 0;
3812 unwind.fp_reg = REG_SP;
3814 unwind.sp_restored = 0;
3818 /* Parse a handlerdata directive. Creates the exception handling table entry
3819 for the function. */
3822 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3824 demand_empty_rest_of_line ();
3825 if (!unwind.proc_start)
3826 as_bad (MISSING_FNSTART);
3828 if (unwind.table_entry)
3829 as_bad (_("duplicate .handlerdata directive"));
3831 create_unwind_entry (1);
3834 /* Parse an unwind_fnend directive. Generates the index table entry. */
3837 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3842 unsigned int marked_pr_dependency;
3844 demand_empty_rest_of_line ();
3846 if (!unwind.proc_start)
3848 as_bad (_(".fnend directive without .fnstart"));
3852 /* Add eh table entry. */
3853 if (unwind.table_entry == NULL)
3854 val = create_unwind_entry (0);
3858 /* Add index table entry. This is two words. */
3859 start_unwind_section (unwind.saved_seg, 1);
3860 frag_align (2, 0, 0);
3861 record_alignment (now_seg, 2);
3863 ptr = frag_more (8);
3865 where = frag_now_fix () - 8;
3867 /* Self relative offset of the function start. */
3868 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3869 BFD_RELOC_ARM_PREL31);
3871 /* Indicate dependency on EHABI-defined personality routines to the
3872 linker, if it hasn't been done already. */
3873 marked_pr_dependency
3874 = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
3875 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3876 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3878 static const char *const name[] =
3880 "__aeabi_unwind_cpp_pr0",
3881 "__aeabi_unwind_cpp_pr1",
3882 "__aeabi_unwind_cpp_pr2"
3884 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3885 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3886 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3887 |= 1 << unwind.personality_index;
3891 /* Inline exception table entry. */
3892 md_number_to_chars (ptr + 4, val, 4);
3894 /* Self relative offset of the table entry. */
3895 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3896 BFD_RELOC_ARM_PREL31);
3898 /* Restore the original section. */
3899 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3901 unwind.proc_start = NULL;
3905 /* Parse an unwind_cantunwind directive. */
3908 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3910 demand_empty_rest_of_line ();
3911 if (!unwind.proc_start)
3912 as_bad (MISSING_FNSTART);
3914 if (unwind.personality_routine || unwind.personality_index != -1)
3915 as_bad (_("personality routine specified for cantunwind frame"));
3917 unwind.personality_index = -2;
3921 /* Parse a personalityindex directive. */
3924 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3928 if (!unwind.proc_start)
3929 as_bad (MISSING_FNSTART);
3931 if (unwind.personality_routine || unwind.personality_index != -1)
3932 as_bad (_("duplicate .personalityindex directive"));
3936 if (exp.X_op != O_constant
3937 || exp.X_add_number < 0 || exp.X_add_number > 15)
3939 as_bad (_("bad personality routine number"));
3940 ignore_rest_of_line ();
3944 unwind.personality_index = exp.X_add_number;
3946 demand_empty_rest_of_line ();
3950 /* Parse a personality directive. */
3953 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3957 if (!unwind.proc_start)
3958 as_bad (MISSING_FNSTART);
3960 if (unwind.personality_routine || unwind.personality_index != -1)
3961 as_bad (_("duplicate .personality directive"));
3963 c = get_symbol_name (& name);
3964 p = input_line_pointer;
3966 ++ input_line_pointer;
3967 unwind.personality_routine = symbol_find_or_make (name);
3969 demand_empty_rest_of_line ();
3973 /* Parse a directive saving core registers. */
3976 s_arm_unwind_save_core (void)
3982 range = parse_reg_list (&input_line_pointer);
3985 as_bad (_("expected register list"));
3986 ignore_rest_of_line ();
3990 demand_empty_rest_of_line ();
3992 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3993 into .unwind_save {..., sp...}. We aren't bothered about the value of
3994 ip because it is clobbered by calls. */
3995 if (unwind.sp_restored && unwind.fp_reg == 12
3996 && (range & 0x3000) == 0x1000)
3998 unwind.opcode_count--;
3999 unwind.sp_restored = 0;
4000 range = (range | 0x2000) & ~0x1000;
4001 unwind.pending_offset = 0;
4007 /* See if we can use the short opcodes. These pop a block of up to 8
4008 registers starting with r4, plus maybe r14. */
4009 for (n = 0; n < 8; n++)
4011 /* Break at the first non-saved register. */
4012 if ((range & (1 << (n + 4))) == 0)
4015 /* See if there are any other bits set. */
4016 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4018 /* Use the long form. */
4019 op = 0x8000 | ((range >> 4) & 0xfff);
4020 add_unwind_opcode (op, 2);
4024 /* Use the short form. */
4026 op = 0xa8; /* Pop r14. */
4028 op = 0xa0; /* Do not pop r14. */
4030 add_unwind_opcode (op, 1);
4037 op = 0xb100 | (range & 0xf);
4038 add_unwind_opcode (op, 2);
4041 /* Record the number of bytes pushed. */
4042 for (n = 0; n < 16; n++)
4044 if (range & (1 << n))
4045 unwind.frame_size += 4;
4050 /* Parse a directive saving FPA registers. */
4053 s_arm_unwind_save_fpa (int reg)
4059 /* Get Number of registers to transfer. */
4060 if (skip_past_comma (&input_line_pointer) != FAIL)
4063 exp.X_op = O_illegal;
4065 if (exp.X_op != O_constant)
4067 as_bad (_("expected , <constant>"));
4068 ignore_rest_of_line ();
4072 num_regs = exp.X_add_number;
4074 if (num_regs < 1 || num_regs > 4)
4076 as_bad (_("number of registers must be in the range [1:4]"));
4077 ignore_rest_of_line ();
4081 demand_empty_rest_of_line ();
4086 op = 0xb4 | (num_regs - 1);
4087 add_unwind_opcode (op, 1);
4092 op = 0xc800 | (reg << 4) | (num_regs - 1);
4093 add_unwind_opcode (op, 2);
4095 unwind.frame_size += num_regs * 12;
4099 /* Parse a directive saving VFP registers for ARMv6 and above. */
4102 s_arm_unwind_save_vfp_armv6 (void)
4107 int num_vfpv3_regs = 0;
4108 int num_regs_below_16;
4110 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
4113 as_bad (_("expected register list"));
4114 ignore_rest_of_line ();
4118 demand_empty_rest_of_line ();
4120 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4121 than FSTMX/FLDMX-style ones). */
4123 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
4125 num_vfpv3_regs = count;
4126 else if (start + count > 16)
4127 num_vfpv3_regs = start + count - 16;
4129 if (num_vfpv3_regs > 0)
4131 int start_offset = start > 16 ? start - 16 : 0;
4132 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4133 add_unwind_opcode (op, 2);
4136 /* Generate opcode for registers numbered in the range 0 .. 15. */
4137 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4138 gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4139 if (num_regs_below_16 > 0)
4141 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4142 add_unwind_opcode (op, 2);
4145 unwind.frame_size += count * 8;
4149 /* Parse a directive saving VFP registers for pre-ARMv6. */
4152 s_arm_unwind_save_vfp (void)
4158 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
4161 as_bad (_("expected register list"));
4162 ignore_rest_of_line ();
4166 demand_empty_rest_of_line ();
4171 op = 0xb8 | (count - 1);
4172 add_unwind_opcode (op, 1);
4177 op = 0xb300 | (reg << 4) | (count - 1);
4178 add_unwind_opcode (op, 2);
4180 unwind.frame_size += count * 8 + 4;
4184 /* Parse a directive saving iWMMXt data registers. */
4187 s_arm_unwind_save_mmxwr (void)
4195 if (*input_line_pointer == '{')
4196 input_line_pointer++;
4200 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4204 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4209 as_tsktsk (_("register list not in ascending order"));
4212 if (*input_line_pointer == '-')
4214 input_line_pointer++;
4215 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4218 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4221 else if (reg >= hi_reg)
4223 as_bad (_("bad register range"));
4226 for (; reg < hi_reg; reg++)
4230 while (skip_past_comma (&input_line_pointer) != FAIL);
4232 skip_past_char (&input_line_pointer, '}');
4234 demand_empty_rest_of_line ();
4236 /* Generate any deferred opcodes because we're going to be looking at
4238 flush_pending_unwind ();
4240 for (i = 0; i < 16; i++)
4242 if (mask & (1 << i))
4243 unwind.frame_size += 8;
4246 /* Attempt to combine with a previous opcode. We do this because gcc
4247 likes to output separate unwind directives for a single block of
4249 if (unwind.opcode_count > 0)
4251 i = unwind.opcodes[unwind.opcode_count - 1];
4252 if ((i & 0xf8) == 0xc0)
4255 /* Only merge if the blocks are contiguous. */
4258 if ((mask & 0xfe00) == (1 << 9))
4260 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4261 unwind.opcode_count--;
4264 else if (i == 6 && unwind.opcode_count >= 2)
4266 i = unwind.opcodes[unwind.opcode_count - 2];
4270 op = 0xffff << (reg - 1);
4272 && ((mask & op) == (1u << (reg - 1))))
4274 op = (1 << (reg + i + 1)) - 1;
4275 op &= ~((1 << reg) - 1);
4277 unwind.opcode_count -= 2;
4284 /* We want to generate opcodes in the order the registers have been
4285 saved, ie. descending order. */
4286 for (reg = 15; reg >= -1; reg--)
4288 /* Save registers in blocks. */
4290 || !(mask & (1 << reg)))
4292 /* We found an unsaved reg. Generate opcodes to save the
4299 op = 0xc0 | (hi_reg - 10);
4300 add_unwind_opcode (op, 1);
4305 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4306 add_unwind_opcode (op, 2);
4315 ignore_rest_of_line ();
4319 s_arm_unwind_save_mmxwcg (void)
4326 if (*input_line_pointer == '{')
4327 input_line_pointer++;
4329 skip_whitespace (input_line_pointer);
4333 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4337 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4343 as_tsktsk (_("register list not in ascending order"));
4346 if (*input_line_pointer == '-')
4348 input_line_pointer++;
4349 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4352 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4355 else if (reg >= hi_reg)
4357 as_bad (_("bad register range"));
4360 for (; reg < hi_reg; reg++)
4364 while (skip_past_comma (&input_line_pointer) != FAIL);
4366 skip_past_char (&input_line_pointer, '}');
4368 demand_empty_rest_of_line ();
4370 /* Generate any deferred opcodes because we're going to be looking at
4372 flush_pending_unwind ();
4374 for (reg = 0; reg < 16; reg++)
4376 if (mask & (1 << reg))
4377 unwind.frame_size += 4;
4380 add_unwind_opcode (op, 2);
4383 ignore_rest_of_line ();
4387 /* Parse an unwind_save directive.
4388 If the argument is non-zero, this is a .vsave directive. */
4391 s_arm_unwind_save (int arch_v6)
4394 struct reg_entry *reg;
4395 bfd_boolean had_brace = FALSE;
4397 if (!unwind.proc_start)
4398 as_bad (MISSING_FNSTART);
4400 /* Figure out what sort of save we have. */
4401 peek = input_line_pointer;
4409 reg = arm_reg_parse_multi (&peek);
4413 as_bad (_("register expected"));
4414 ignore_rest_of_line ();
4423 as_bad (_("FPA .unwind_save does not take a register list"));
4424 ignore_rest_of_line ();
4427 input_line_pointer = peek;
4428 s_arm_unwind_save_fpa (reg->number);
4432 s_arm_unwind_save_core ();
4437 s_arm_unwind_save_vfp_armv6 ();
4439 s_arm_unwind_save_vfp ();
4442 case REG_TYPE_MMXWR:
4443 s_arm_unwind_save_mmxwr ();
4446 case REG_TYPE_MMXWCG:
4447 s_arm_unwind_save_mmxwcg ();
4451 as_bad (_(".unwind_save does not support this kind of register"));
4452 ignore_rest_of_line ();
4457 /* Parse an unwind_movsp directive. */
4460 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4466 if (!unwind.proc_start)
4467 as_bad (MISSING_FNSTART);
4469 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4472 as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4473 ignore_rest_of_line ();
4477 /* Optional constant. */
4478 if (skip_past_comma (&input_line_pointer) != FAIL)
4480 if (immediate_for_directive (&offset) == FAIL)
4486 demand_empty_rest_of_line ();
4488 if (reg == REG_SP || reg == REG_PC)
4490 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4494 if (unwind.fp_reg != REG_SP)
4495 as_bad (_("unexpected .unwind_movsp directive"));
4497 /* Generate opcode to restore the value. */
4499 add_unwind_opcode (op, 1);
4501 /* Record the information for later. */
4502 unwind.fp_reg = reg;
4503 unwind.fp_offset = unwind.frame_size - offset;
4504 unwind.sp_restored = 1;
4507 /* Parse an unwind_pad directive. */
4510 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4514 if (!unwind.proc_start)
4515 as_bad (MISSING_FNSTART);
4517 if (immediate_for_directive (&offset) == FAIL)
4522 as_bad (_("stack increment must be multiple of 4"));
4523 ignore_rest_of_line ();
4527 /* Don't generate any opcodes, just record the details for later. */
4528 unwind.frame_size += offset;
4529 unwind.pending_offset += offset;
4531 demand_empty_rest_of_line ();
4534 /* Parse an unwind_setfp directive. */
4537 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4543 if (!unwind.proc_start)
4544 as_bad (MISSING_FNSTART);
4546 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4547 if (skip_past_comma (&input_line_pointer) == FAIL)
4550 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4552 if (fp_reg == FAIL || sp_reg == FAIL)
4554 as_bad (_("expected <reg>, <reg>"));
4555 ignore_rest_of_line ();
4559 /* Optional constant. */
4560 if (skip_past_comma (&input_line_pointer) != FAIL)
4562 if (immediate_for_directive (&offset) == FAIL)
4568 demand_empty_rest_of_line ();
4570 if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4572 as_bad (_("register must be either sp or set by a previous"
4573 "unwind_movsp directive"));
4577 /* Don't generate any opcodes, just record the information for later. */
4578 unwind.fp_reg = fp_reg;
4580 if (sp_reg == REG_SP)
4581 unwind.fp_offset = unwind.frame_size - offset;
4583 unwind.fp_offset -= offset;
4586 /* Parse an unwind_raw directive. */
4589 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4592 /* This is an arbitrary limit. */
4593 unsigned char op[16];
4596 if (!unwind.proc_start)
4597 as_bad (MISSING_FNSTART);
4600 if (exp.X_op == O_constant
4601 && skip_past_comma (&input_line_pointer) != FAIL)
4603 unwind.frame_size += exp.X_add_number;
4607 exp.X_op = O_illegal;
4609 if (exp.X_op != O_constant)
4611 as_bad (_("expected <offset>, <opcode>"));
4612 ignore_rest_of_line ();
4618 /* Parse the opcode. */
4623 as_bad (_("unwind opcode too long"));
4624 ignore_rest_of_line ();
4626 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4628 as_bad (_("invalid unwind opcode"));
4629 ignore_rest_of_line ();
4632 op[count++] = exp.X_add_number;
4634 /* Parse the next byte. */
4635 if (skip_past_comma (&input_line_pointer) == FAIL)
4641 /* Add the opcode bytes in reverse order. */
4643 add_unwind_opcode (op[count], 1);
4645 demand_empty_rest_of_line ();
4649 /* Parse a .eabi_attribute directive. */
4652 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4654 int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4656 if (tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4657 attributes_set_explicitly[tag] = 1;
4660 /* Emit a tls fix for the symbol. */
4663 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4667 #ifdef md_flush_pending_output
4668 md_flush_pending_output ();
4671 #ifdef md_cons_align
4675 /* Since we're just labelling the code, there's no need to define a
4678 p = obstack_next_free (&frchain_now->frch_obstack);
4679 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
4680 thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
4681 : BFD_RELOC_ARM_TLS_DESCSEQ);
4683 #endif /* OBJ_ELF */
4685 static void s_arm_arch (int);
4686 static void s_arm_object_arch (int);
4687 static void s_arm_cpu (int);
4688 static void s_arm_fpu (int);
4689 static void s_arm_arch_extension (int);
4694 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
4701 if (exp.X_op == O_symbol)
4702 exp.X_op = O_secrel;
4704 emit_expr (&exp, 4);
4706 while (*input_line_pointer++ == ',');
4708 input_line_pointer--;
4709 demand_empty_rest_of_line ();
4713 /* This table describes all the machine specific pseudo-ops the assembler
4714 has to support. The fields are:
4715 pseudo-op name without dot
4716 function to call to execute this pseudo-op
4717 Integer arg to pass to the function. */
4719 const pseudo_typeS md_pseudo_table[] =
4721 /* Never called because '.req' does not start a line. */
4722 { "req", s_req, 0 },
4723 /* Following two are likewise never called. */
4726 { "unreq", s_unreq, 0 },
4727 { "bss", s_bss, 0 },
4728 { "align", s_align_ptwo, 2 },
4729 { "arm", s_arm, 0 },
4730 { "thumb", s_thumb, 0 },
4731 { "code", s_code, 0 },
4732 { "force_thumb", s_force_thumb, 0 },
4733 { "thumb_func", s_thumb_func, 0 },
4734 { "thumb_set", s_thumb_set, 0 },
4735 { "even", s_even, 0 },
4736 { "ltorg", s_ltorg, 0 },
4737 { "pool", s_ltorg, 0 },
4738 { "syntax", s_syntax, 0 },
4739 { "cpu", s_arm_cpu, 0 },
4740 { "arch", s_arm_arch, 0 },
4741 { "object_arch", s_arm_object_arch, 0 },
4742 { "fpu", s_arm_fpu, 0 },
4743 { "arch_extension", s_arm_arch_extension, 0 },
4745 { "word", s_arm_elf_cons, 4 },
4746 { "long", s_arm_elf_cons, 4 },
4747 { "inst.n", s_arm_elf_inst, 2 },
4748 { "inst.w", s_arm_elf_inst, 4 },
4749 { "inst", s_arm_elf_inst, 0 },
4750 { "rel31", s_arm_rel31, 0 },
4751 { "fnstart", s_arm_unwind_fnstart, 0 },
4752 { "fnend", s_arm_unwind_fnend, 0 },
4753 { "cantunwind", s_arm_unwind_cantunwind, 0 },
4754 { "personality", s_arm_unwind_personality, 0 },
4755 { "personalityindex", s_arm_unwind_personalityindex, 0 },
4756 { "handlerdata", s_arm_unwind_handlerdata, 0 },
4757 { "save", s_arm_unwind_save, 0 },
4758 { "vsave", s_arm_unwind_save, 1 },
4759 { "movsp", s_arm_unwind_movsp, 0 },
4760 { "pad", s_arm_unwind_pad, 0 },
4761 { "setfp", s_arm_unwind_setfp, 0 },
4762 { "unwind_raw", s_arm_unwind_raw, 0 },
4763 { "eabi_attribute", s_arm_eabi_attribute, 0 },
4764 { "tlsdescseq", s_arm_tls_descseq, 0 },
4768 /* These are used for dwarf. */
4772 /* These are used for dwarf2. */
4773 { "file", dwarf2_directive_file, 0 },
4774 { "loc", dwarf2_directive_loc, 0 },
4775 { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
4777 { "extend", float_cons, 'x' },
4778 { "ldouble", float_cons, 'x' },
4779 { "packed", float_cons, 'p' },
4781 {"secrel32", pe_directive_secrel, 0},
4784 /* These are for compatibility with CodeComposer Studio. */
4785 {"ref", s_ccs_ref, 0},
4786 {"def", s_ccs_def, 0},
4787 {"asmfunc", s_ccs_asmfunc, 0},
4788 {"endasmfunc", s_ccs_endasmfunc, 0},
4793 /* Parser functions used exclusively in instruction operands. */
4795 /* Generic immediate-value read function for use in insn parsing.
4796 STR points to the beginning of the immediate (the leading #);
4797 VAL receives the value; if the value is outside [MIN, MAX]
4798 issue an error. PREFIX_OPT is true if the immediate prefix is
4802 parse_immediate (char **str, int *val, int min, int max,
4803 bfd_boolean prefix_opt)
4807 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
4808 if (exp.X_op != O_constant)
4810 inst.error = _("constant expression required");
4814 if (exp.X_add_number < min || exp.X_add_number > max)
4816 inst.error = _("immediate value out of range");
4820 *val = exp.X_add_number;
4824 /* Less-generic immediate-value read function with the possibility of loading a
4825 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
4826 instructions. Puts the result directly in inst.operands[i]. */
4829 parse_big_immediate (char **str, int i, expressionS *in_exp,
4830 bfd_boolean allow_symbol_p)
4833 expressionS *exp_p = in_exp ? in_exp : &exp;
4836 my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
4838 if (exp_p->X_op == O_constant)
4840 inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
4841 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4842 O_constant. We have to be careful not to break compilation for
4843 32-bit X_add_number, though. */
4844 if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
4846 /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4. */
4847 inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
4849 inst.operands[i].regisimm = 1;
4852 else if (exp_p->X_op == O_big
4853 && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
4855 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4857 /* Bignums have their least significant bits in
4858 generic_bignum[0]. Make sure we put 32 bits in imm and
4859 32 bits in reg, in a (hopefully) portable way. */
4860 gas_assert (parts != 0);
4862 /* Make sure that the number is not too big.
4863 PR 11972: Bignums can now be sign-extended to the
4864 size of a .octa so check that the out of range bits
4865 are all zero or all one. */
4866 if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
4868 LITTLENUM_TYPE m = -1;
4870 if (generic_bignum[parts * 2] != 0
4871 && generic_bignum[parts * 2] != m)
4874 for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
4875 if (generic_bignum[j] != generic_bignum[j-1])
4879 inst.operands[i].imm = 0;
4880 for (j = 0; j < parts; j++, idx++)
4881 inst.operands[i].imm |= generic_bignum[idx]
4882 << (LITTLENUM_NUMBER_OF_BITS * j);
4883 inst.operands[i].reg = 0;
4884 for (j = 0; j < parts; j++, idx++)
4885 inst.operands[i].reg |= generic_bignum[idx]
4886 << (LITTLENUM_NUMBER_OF_BITS * j);
4887 inst.operands[i].regisimm = 1;
4889 else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
4897 /* Returns the pseudo-register number of an FPA immediate constant,
4898 or FAIL if there isn't a valid constant here. */
4901 parse_fpa_immediate (char ** str)
4903 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4909 /* First try and match exact strings, this is to guarantee
4910 that some formats will work even for cross assembly. */
4912 for (i = 0; fp_const[i]; i++)
4914 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4918 *str += strlen (fp_const[i]);
4919 if (is_end_of_line[(unsigned char) **str])
4925 /* Just because we didn't get a match doesn't mean that the constant
4926 isn't valid, just that it is in a format that we don't
4927 automatically recognize. Try parsing it with the standard
4928 expression routines. */
4930 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4932 /* Look for a raw floating point number. */
4933 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4934 && is_end_of_line[(unsigned char) *save_in])
4936 for (i = 0; i < NUM_FLOAT_VALS; i++)
4938 for (j = 0; j < MAX_LITTLENUMS; j++)
4940 if (words[j] != fp_values[i][j])
4944 if (j == MAX_LITTLENUMS)
4952 /* Try and parse a more complex expression, this will probably fail
4953 unless the code uses a floating point prefix (eg "0f"). */
4954 save_in = input_line_pointer;
4955 input_line_pointer = *str;
4956 if (expression (&exp) == absolute_section
4957 && exp.X_op == O_big
4958 && exp.X_add_number < 0)
4960 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4962 #define X_PRECISION 5
4963 #define E_PRECISION 15L
4964 if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
4966 for (i = 0; i < NUM_FLOAT_VALS; i++)
4968 for (j = 0; j < MAX_LITTLENUMS; j++)
4970 if (words[j] != fp_values[i][j])
4974 if (j == MAX_LITTLENUMS)
4976 *str = input_line_pointer;
4977 input_line_pointer = save_in;
4984 *str = input_line_pointer;
4985 input_line_pointer = save_in;
4986 inst.error = _("invalid FPA immediate expression");
4990 /* Returns 1 if a number has "quarter-precision" float format
4991 0baBbbbbbc defgh000 00000000 00000000. */
4994 is_quarter_float (unsigned imm)
4996 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4997 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5001 /* Detect the presence of a floating point or integer zero constant,
5005 parse_ifimm_zero (char **in)
5009 if (!is_immediate_prefix (**in))
5011 /* In unified syntax, all prefixes are optional. */
5012 if (!unified_syntax)
5018 /* Accept #0x0 as a synonym for #0. */
5019 if (strncmp (*in, "0x", 2) == 0)
5022 if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5027 error_code = atof_generic (in, ".", EXP_CHARS,
5028 &generic_floating_point_number);
5031 && generic_floating_point_number.sign == '+'
5032 && (generic_floating_point_number.low
5033 > generic_floating_point_number.leader))
5039 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5040 0baBbbbbbc defgh000 00000000 00000000.
5041 The zero and minus-zero cases need special handling, since they can't be
5042 encoded in the "quarter-precision" float format, but can nonetheless be
5043 loaded as integer constants. */
5046 parse_qfloat_immediate (char **ccp, int *immed)
5050 LITTLENUM_TYPE words[MAX_LITTLENUMS];
5051 int found_fpchar = 0;
5053 skip_past_char (&str, '#');
5055 /* We must not accidentally parse an integer as a floating-point number. Make
5056 sure that the value we parse is not an integer by checking for special
5057 characters '.' or 'e'.
5058 FIXME: This is a horrible hack, but doing better is tricky because type
5059 information isn't in a very usable state at parse time. */
5061 skip_whitespace (fpnum);
5063 if (strncmp (fpnum, "0x", 2) == 0)
5067 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5068 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5078 if ((str = atof_ieee (str, 's', words)) != NULL)
5080 unsigned fpword = 0;
5083 /* Our FP word must be 32 bits (single-precision FP). */
5084 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5086 fpword <<= LITTLENUM_NUMBER_OF_BITS;
5090 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5103 /* Shift operands. */
5106 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
5109 struct asm_shift_name
5112 enum shift_kind kind;
5115 /* Third argument to parse_shift. */
5116 enum parse_shift_mode
5118 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
5119 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
5120 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
5121 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
5122 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
5125 /* Parse a <shift> specifier on an ARM data processing instruction.
5126 This has three forms:
5128 (LSL|LSR|ASL|ASR|ROR) Rs
5129 (LSL|LSR|ASL|ASR|ROR) #imm
5132 Note that ASL is assimilated to LSL in the instruction encoding, and
5133 RRX to ROR #0 (which cannot be written as such). */
5136 parse_shift (char **str, int i, enum parse_shift_mode mode)
5138 const struct asm_shift_name *shift_name;
5139 enum shift_kind shift;
5144 for (p = *str; ISALPHA (*p); p++)
5149 inst.error = _("shift expression expected");
5153 shift_name = (const struct asm_shift_name *) hash_find_n (arm_shift_hsh, *str,
5156 if (shift_name == NULL)
5158 inst.error = _("shift expression expected");
5162 shift = shift_name->kind;
5166 case NO_SHIFT_RESTRICT:
5167 case SHIFT_IMMEDIATE: break;
5169 case SHIFT_LSL_OR_ASR_IMMEDIATE:
5170 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5172 inst.error = _("'LSL' or 'ASR' required");
5177 case SHIFT_LSL_IMMEDIATE:
5178 if (shift != SHIFT_LSL)
5180 inst.error = _("'LSL' required");
5185 case SHIFT_ASR_IMMEDIATE:
5186 if (shift != SHIFT_ASR)
5188 inst.error = _("'ASR' required");
5196 if (shift != SHIFT_RRX)
5198 /* Whitespace can appear here if the next thing is a bare digit. */
5199 skip_whitespace (p);
5201 if (mode == NO_SHIFT_RESTRICT
5202 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5204 inst.operands[i].imm = reg;
5205 inst.operands[i].immisreg = 1;
5207 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5210 inst.operands[i].shift_kind = shift;
5211 inst.operands[i].shifted = 1;
5216 /* Parse a <shifter_operand> for an ARM data processing instruction:
5219 #<immediate>, <rotate>
5223 where <shift> is defined by parse_shift above, and <rotate> is a
5224 multiple of 2 between 0 and 30. Validation of immediate operands
5225 is deferred to md_apply_fix. */
5228 parse_shifter_operand (char **str, int i)
5233 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5235 inst.operands[i].reg = value;
5236 inst.operands[i].isreg = 1;
5238 /* parse_shift will override this if appropriate */
5239 inst.reloc.exp.X_op = O_constant;
5240 inst.reloc.exp.X_add_number = 0;
5242 if (skip_past_comma (str) == FAIL)
5245 /* Shift operation on register. */
5246 return parse_shift (str, i, NO_SHIFT_RESTRICT);
5249 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
5252 if (skip_past_comma (str) == SUCCESS)
5254 /* #x, y -- ie explicit rotation by Y. */
5255 if (my_get_expression (&exp, str, GE_NO_PREFIX))
5258 if (exp.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
5260 inst.error = _("constant expression expected");
5264 value = exp.X_add_number;
5265 if (value < 0 || value > 30 || value % 2 != 0)
5267 inst.error = _("invalid rotation");
5270 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
5272 inst.error = _("invalid constant");
5276 /* Encode as specified. */
5277 inst.operands[i].imm = inst.reloc.exp.X_add_number | value << 7;
5281 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5282 inst.reloc.pc_rel = 0;
5286 /* Group relocation information. Each entry in the table contains the
5287 textual name of the relocation as may appear in assembler source
5288 and must end with a colon.
5289 Along with this textual name are the relocation codes to be used if
5290 the corresponding instruction is an ALU instruction (ADD or SUB only),
5291 an LDR, an LDRS, or an LDC. */
5293 struct group_reloc_table_entry
5304 /* Varieties of non-ALU group relocation. */
5311 static struct group_reloc_table_entry group_reloc_table[] =
5312 { /* Program counter relative: */
5314 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
5319 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
5320 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
5321 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
5322 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
5324 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
5329 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
5330 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
5331 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
5332 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
5334 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
5335 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
5336 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
5337 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
5338 /* Section base relative */
5340 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
5345 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
5346 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
5347 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
5348 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
5350 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
5355 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
5356 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
5357 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
5358 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
5360 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
5361 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
5362 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
5363 BFD_RELOC_ARM_LDC_SB_G2 }, /* LDC */
5364 /* Absolute thumb alu relocations. */
5366 BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU. */
5371 BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU. */
5376 BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU. */
5381 BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU. */
5386 /* Given the address of a pointer pointing to the textual name of a group
5387 relocation as may appear in assembler source, attempt to find its details
5388 in group_reloc_table. The pointer will be updated to the character after
5389 the trailing colon. On failure, FAIL will be returned; SUCCESS
5390 otherwise. On success, *entry will be updated to point at the relevant
5391 group_reloc_table entry. */
5394 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5397 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5399 int length = strlen (group_reloc_table[i].name);
5401 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5402 && (*str)[length] == ':')
5404 *out = &group_reloc_table[i];
5405 *str += (length + 1);
5413 /* Parse a <shifter_operand> for an ARM data processing instruction
5414 (as for parse_shifter_operand) where group relocations are allowed:
5417 #<immediate>, <rotate>
5418 #:<group_reloc>:<expression>
5422 where <group_reloc> is one of the strings defined in group_reloc_table.
5423 The hashes are optional.
5425 Everything else is as for parse_shifter_operand. */
5427 static parse_operand_result
5428 parse_shifter_operand_group_reloc (char **str, int i)
5430 /* Determine if we have the sequence of characters #: or just :
5431 coming next. If we do, then we check for a group relocation.
5432 If we don't, punt the whole lot to parse_shifter_operand. */
5434 if (((*str)[0] == '#' && (*str)[1] == ':')
5435 || (*str)[0] == ':')
5437 struct group_reloc_table_entry *entry;
5439 if ((*str)[0] == '#')
5444 /* Try to parse a group relocation. Anything else is an error. */
5445 if (find_group_reloc_table_entry (str, &entry) == FAIL)
5447 inst.error = _("unknown group relocation");
5448 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5451 /* We now have the group relocation table entry corresponding to
5452 the name in the assembler source. Next, we parse the expression. */
5453 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
5454 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5456 /* Record the relocation type (always the ALU variant here). */
5457 inst.reloc.type = (bfd_reloc_code_real_type) entry->alu_code;
5458 gas_assert (inst.reloc.type != 0);
5460 return PARSE_OPERAND_SUCCESS;
5463 return parse_shifter_operand (str, i) == SUCCESS
5464 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5466 /* Never reached. */
5469 /* Parse a Neon alignment expression. Information is written to
5470 inst.operands[i]. We assume the initial ':' has been skipped.
5472 align .imm = align << 8, .immisalign=1, .preind=0 */
5473 static parse_operand_result
5474 parse_neon_alignment (char **str, int i)
5479 my_get_expression (&exp, &p, GE_NO_PREFIX);
5481 if (exp.X_op != O_constant)
5483 inst.error = _("alignment must be constant");
5484 return PARSE_OPERAND_FAIL;
5487 inst.operands[i].imm = exp.X_add_number << 8;
5488 inst.operands[i].immisalign = 1;
5489 /* Alignments are not pre-indexes. */
5490 inst.operands[i].preind = 0;
5493 return PARSE_OPERAND_SUCCESS;
5496 /* Parse all forms of an ARM address expression. Information is written
5497 to inst.operands[i] and/or inst.reloc.
5499 Preindexed addressing (.preind=1):
5501 [Rn, #offset] .reg=Rn .reloc.exp=offset
5502 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5503 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5504 .shift_kind=shift .reloc.exp=shift_imm
5506 These three may have a trailing ! which causes .writeback to be set also.
5508 Postindexed addressing (.postind=1, .writeback=1):
5510 [Rn], #offset .reg=Rn .reloc.exp=offset
5511 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5512 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5513 .shift_kind=shift .reloc.exp=shift_imm
5515 Unindexed addressing (.preind=0, .postind=0):
5517 [Rn], {option} .reg=Rn .imm=option .immisreg=0
5521 [Rn]{!} shorthand for [Rn,#0]{!}
5522 =immediate .isreg=0 .reloc.exp=immediate
5523 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
5525 It is the caller's responsibility to check for addressing modes not
5526 supported by the instruction, and to set inst.reloc.type. */
5528 static parse_operand_result
5529 parse_address_main (char **str, int i, int group_relocations,
5530 group_reloc_type group_type)
5535 if (skip_past_char (&p, '[') == FAIL)
5537 if (skip_past_char (&p, '=') == FAIL)
5539 /* Bare address - translate to PC-relative offset. */
5540 inst.reloc.pc_rel = 1;
5541 inst.operands[i].reg = REG_PC;
5542 inst.operands[i].isreg = 1;
5543 inst.operands[i].preind = 1;
5545 if (my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX_BIG))
5546 return PARSE_OPERAND_FAIL;
5548 else if (parse_big_immediate (&p, i, &inst.reloc.exp,
5549 /*allow_symbol_p=*/TRUE))
5550 return PARSE_OPERAND_FAIL;
5553 return PARSE_OPERAND_SUCCESS;
5556 /* PR gas/14887: Allow for whitespace after the opening bracket. */
5557 skip_whitespace (p);
5559 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5561 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5562 return PARSE_OPERAND_FAIL;
5564 inst.operands[i].reg = reg;
5565 inst.operands[i].isreg = 1;
5567 if (skip_past_comma (&p) == SUCCESS)
5569 inst.operands[i].preind = 1;
5572 else if (*p == '-') p++, inst.operands[i].negative = 1;
5574 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5576 inst.operands[i].imm = reg;
5577 inst.operands[i].immisreg = 1;
5579 if (skip_past_comma (&p) == SUCCESS)
5580 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5581 return PARSE_OPERAND_FAIL;
5583 else if (skip_past_char (&p, ':') == SUCCESS)
5585 /* FIXME: '@' should be used here, but it's filtered out by generic
5586 code before we get to see it here. This may be subject to
5588 parse_operand_result result = parse_neon_alignment (&p, i);
5590 if (result != PARSE_OPERAND_SUCCESS)
5595 if (inst.operands[i].negative)
5597 inst.operands[i].negative = 0;
5601 if (group_relocations
5602 && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
5604 struct group_reloc_table_entry *entry;
5606 /* Skip over the #: or : sequence. */
5612 /* Try to parse a group relocation. Anything else is an
5614 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
5616 inst.error = _("unknown group relocation");
5617 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5620 /* We now have the group relocation table entry corresponding to
5621 the name in the assembler source. Next, we parse the
5623 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5624 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5626 /* Record the relocation type. */
5630 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldr_code;
5634 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldrs_code;
5638 inst.reloc.type = (bfd_reloc_code_real_type) entry->ldc_code;
5645 if (inst.reloc.type == 0)
5647 inst.error = _("this group relocation is not allowed on this instruction");
5648 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5655 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5656 return PARSE_OPERAND_FAIL;
5657 /* If the offset is 0, find out if it's a +0 or -0. */
5658 if (inst.reloc.exp.X_op == O_constant
5659 && inst.reloc.exp.X_add_number == 0)
5661 skip_whitespace (q);
5665 skip_whitespace (q);
5668 inst.operands[i].negative = 1;
5673 else if (skip_past_char (&p, ':') == SUCCESS)
5675 /* FIXME: '@' should be used here, but it's filtered out by generic code
5676 before we get to see it here. This may be subject to change. */
5677 parse_operand_result result = parse_neon_alignment (&p, i);
5679 if (result != PARSE_OPERAND_SUCCESS)
5683 if (skip_past_char (&p, ']') == FAIL)
5685 inst.error = _("']' expected");
5686 return PARSE_OPERAND_FAIL;
5689 if (skip_past_char (&p, '!') == SUCCESS)
5690 inst.operands[i].writeback = 1;
5692 else if (skip_past_comma (&p) == SUCCESS)
5694 if (skip_past_char (&p, '{') == SUCCESS)
5696 /* [Rn], {expr} - unindexed, with option */
5697 if (parse_immediate (&p, &inst.operands[i].imm,
5698 0, 255, TRUE) == FAIL)
5699 return PARSE_OPERAND_FAIL;
5701 if (skip_past_char (&p, '}') == FAIL)
5703 inst.error = _("'}' expected at end of 'option' field");
5704 return PARSE_OPERAND_FAIL;
5706 if (inst.operands[i].preind)
5708 inst.error = _("cannot combine index with option");
5709 return PARSE_OPERAND_FAIL;
5712 return PARSE_OPERAND_SUCCESS;
5716 inst.operands[i].postind = 1;
5717 inst.operands[i].writeback = 1;
5719 if (inst.operands[i].preind)
5721 inst.error = _("cannot combine pre- and post-indexing");
5722 return PARSE_OPERAND_FAIL;
5726 else if (*p == '-') p++, inst.operands[i].negative = 1;
5728 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5730 /* We might be using the immediate for alignment already. If we
5731 are, OR the register number into the low-order bits. */
5732 if (inst.operands[i].immisalign)
5733 inst.operands[i].imm |= reg;
5735 inst.operands[i].imm = reg;
5736 inst.operands[i].immisreg = 1;
5738 if (skip_past_comma (&p) == SUCCESS)
5739 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
5740 return PARSE_OPERAND_FAIL;
5746 if (inst.operands[i].negative)
5748 inst.operands[i].negative = 0;
5751 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
5752 return PARSE_OPERAND_FAIL;
5753 /* If the offset is 0, find out if it's a +0 or -0. */
5754 if (inst.reloc.exp.X_op == O_constant
5755 && inst.reloc.exp.X_add_number == 0)
5757 skip_whitespace (q);
5761 skip_whitespace (q);
5764 inst.operands[i].negative = 1;
5770 /* If at this point neither .preind nor .postind is set, we have a
5771 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
5772 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
5774 inst.operands[i].preind = 1;
5775 inst.reloc.exp.X_op = O_constant;
5776 inst.reloc.exp.X_add_number = 0;
5779 return PARSE_OPERAND_SUCCESS;
5783 parse_address (char **str, int i)
5785 return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
5789 static parse_operand_result
5790 parse_address_group_reloc (char **str, int i, group_reloc_type type)
5792 return parse_address_main (str, i, 1, type);
5795 /* Parse an operand for a MOVW or MOVT instruction. */
5797 parse_half (char **str)
5802 skip_past_char (&p, '#');
5803 if (strncasecmp (p, ":lower16:", 9) == 0)
5804 inst.reloc.type = BFD_RELOC_ARM_MOVW;
5805 else if (strncasecmp (p, ":upper16:", 9) == 0)
5806 inst.reloc.type = BFD_RELOC_ARM_MOVT;
5808 if (inst.reloc.type != BFD_RELOC_UNUSED)
5811 skip_whitespace (p);
5814 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
5817 if (inst.reloc.type == BFD_RELOC_UNUSED)
5819 if (inst.reloc.exp.X_op != O_constant)
5821 inst.error = _("constant expression expected");
5824 if (inst.reloc.exp.X_add_number < 0
5825 || inst.reloc.exp.X_add_number > 0xffff)
5827 inst.error = _("immediate value out of range");
5835 /* Miscellaneous. */
5837 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
5838 or a bitmask suitable to be or-ed into the ARM msr instruction. */
5840 parse_psr (char **str, bfd_boolean lhs)
5843 unsigned long psr_field;
5844 const struct asm_psr *psr;
5846 bfd_boolean is_apsr = FALSE;
5847 bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
5849 /* PR gas/12698: If the user has specified -march=all then m_profile will
5850 be TRUE, but we want to ignore it in this case as we are building for any
5851 CPU type, including non-m variants. */
5852 if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
5855 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
5856 feature for ease of use and backwards compatibility. */
5858 if (strncasecmp (p, "SPSR", 4) == 0)
5861 goto unsupported_psr;
5863 psr_field = SPSR_BIT;
5865 else if (strncasecmp (p, "CPSR", 4) == 0)
5868 goto unsupported_psr;
5872 else if (strncasecmp (p, "APSR", 4) == 0)
5874 /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
5875 and ARMv7-R architecture CPUs. */
5884 while (ISALNUM (*p) || *p == '_');
5886 if (strncasecmp (start, "iapsr", 5) == 0
5887 || strncasecmp (start, "eapsr", 5) == 0
5888 || strncasecmp (start, "xpsr", 4) == 0
5889 || strncasecmp (start, "psr", 3) == 0)
5890 p = start + strcspn (start, "rR") + 1;
5892 psr = (const struct asm_psr *) hash_find_n (arm_v7m_psr_hsh, start,
5898 /* If APSR is being written, a bitfield may be specified. Note that
5899 APSR itself is handled above. */
5900 if (psr->field <= 3)
5902 psr_field = psr->field;
5908 /* M-profile MSR instructions have the mask field set to "10", except
5909 *PSR variants which modify APSR, which may use a different mask (and
5910 have been handled already). Do that by setting the PSR_f field
5912 return psr->field | (lhs ? PSR_f : 0);
5915 goto unsupported_psr;
5921 /* A suffix follows. */
5927 while (ISALNUM (*p) || *p == '_');
5931 /* APSR uses a notation for bits, rather than fields. */
5932 unsigned int nzcvq_bits = 0;
5933 unsigned int g_bit = 0;
5936 for (bit = start; bit != p; bit++)
5938 switch (TOLOWER (*bit))
5941 nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
5945 nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
5949 nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
5953 nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
5957 nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
5961 g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
5965 inst.error = _("unexpected bit specified after APSR");
5970 if (nzcvq_bits == 0x1f)
5975 if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
5977 inst.error = _("selected processor does not "
5978 "support DSP extension");
5985 if ((nzcvq_bits & 0x20) != 0
5986 || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
5987 || (g_bit & 0x2) != 0)
5989 inst.error = _("bad bitmask specified after APSR");
5995 psr = (const struct asm_psr *) hash_find_n (arm_psr_hsh, start,
6000 psr_field |= psr->field;
6006 goto error; /* Garbage after "[CS]PSR". */
6008 /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes). This
6009 is deprecated, but allow it anyway. */
6013 as_tsktsk (_("writing to APSR without specifying a bitmask is "
6016 else if (!m_profile)
6017 /* These bits are never right for M-profile devices: don't set them
6018 (only code paths which read/write APSR reach here). */
6019 psr_field |= (PSR_c | PSR_f);
6025 inst.error = _("selected processor does not support requested special "
6026 "purpose register");
6030 inst.error = _("flag for {c}psr instruction expected");
6034 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
6035 value suitable for splatting into the AIF field of the instruction. */
6038 parse_cps_flags (char **str)
6047 case '\0': case ',':
6050 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6051 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6052 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6055 inst.error = _("unrecognized CPS flag");
6060 if (saw_a_flag == 0)
6062 inst.error = _("missing CPS flags");
6070 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6071 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
6074 parse_endian_specifier (char **str)
6079 if (strncasecmp (s, "BE", 2))
6081 else if (strncasecmp (s, "LE", 2))
6085 inst.error = _("valid endian specifiers are be or le");
6089 if (ISALNUM (s[2]) || s[2] == '_')
6091 inst.error = _("valid endian specifiers are be or le");
6096 return little_endian;
6099 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
6100 value suitable for poking into the rotate field of an sxt or sxta
6101 instruction, or FAIL on error. */
6104 parse_ror (char **str)
6109 if (strncasecmp (s, "ROR", 3) == 0)
6113 inst.error = _("missing rotation field after comma");
6117 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6122 case 0: *str = s; return 0x0;
6123 case 8: *str = s; return 0x1;
6124 case 16: *str = s; return 0x2;
6125 case 24: *str = s; return 0x3;
6128 inst.error = _("rotation can only be 0, 8, 16, or 24");
6133 /* Parse a conditional code (from conds[] below). The value returned is in the
6134 range 0 .. 14, or FAIL. */
6136 parse_cond (char **str)
6139 const struct asm_cond *c;
6141 /* Condition codes are always 2 characters, so matching up to
6142 3 characters is sufficient. */
6147 while (ISALPHA (*q) && n < 3)
6149 cond[n] = TOLOWER (*q);
6154 c = (const struct asm_cond *) hash_find_n (arm_cond_hsh, cond, n);
6157 inst.error = _("condition required");
6165 /* Record a use of the given feature. */
6167 record_feature_use (const arm_feature_set *feature)
6170 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
6172 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
6175 /* If the given feature is currently allowed, mark it as used and return TRUE.
6176 Return FALSE otherwise. */
6178 mark_feature_used (const arm_feature_set *feature)
6180 /* Ensure the option is currently allowed. */
6181 if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
6184 /* Add the appropriate architecture feature for the barrier option used. */
6185 record_feature_use (feature);
6190 /* Parse an option for a barrier instruction. Returns the encoding for the
6193 parse_barrier (char **str)
6196 const struct asm_barrier_opt *o;
6199 while (ISALPHA (*q))
6202 o = (const struct asm_barrier_opt *) hash_find_n (arm_barrier_opt_hsh, p,
6207 if (!mark_feature_used (&o->arch))
6214 /* Parse the operands of a table branch instruction. Similar to a memory
6217 parse_tb (char **str)
6222 if (skip_past_char (&p, '[') == FAIL)
6224 inst.error = _("'[' expected");
6228 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6230 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6233 inst.operands[0].reg = reg;
6235 if (skip_past_comma (&p) == FAIL)
6237 inst.error = _("',' expected");
6241 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6243 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6246 inst.operands[0].imm = reg;
6248 if (skip_past_comma (&p) == SUCCESS)
6250 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6252 if (inst.reloc.exp.X_add_number != 1)
6254 inst.error = _("invalid shift");
6257 inst.operands[0].shifted = 1;
6260 if (skip_past_char (&p, ']') == FAIL)
6262 inst.error = _("']' expected");
6269 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6270 information on the types the operands can take and how they are encoded.
6271 Up to four operands may be read; this function handles setting the
6272 ".present" field for each read operand itself.
6273 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6274 else returns FAIL. */
6277 parse_neon_mov (char **str, int *which_operand)
6279 int i = *which_operand, val;
6280 enum arm_reg_type rtype;
6282 struct neon_type_el optype;
6284 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6286 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
6287 inst.operands[i].reg = val;
6288 inst.operands[i].isscalar = 1;
6289 inst.operands[i].vectype = optype;
6290 inst.operands[i++].present = 1;
6292 if (skip_past_comma (&ptr) == FAIL)
6295 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6298 inst.operands[i].reg = val;
6299 inst.operands[i].isreg = 1;
6300 inst.operands[i].present = 1;
6302 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6305 /* Cases 0, 1, 2, 3, 5 (D only). */
6306 if (skip_past_comma (&ptr) == FAIL)
6309 inst.operands[i].reg = val;
6310 inst.operands[i].isreg = 1;
6311 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6312 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6313 inst.operands[i].isvec = 1;
6314 inst.operands[i].vectype = optype;
6315 inst.operands[i++].present = 1;
6317 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6319 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6320 Case 13: VMOV <Sd>, <Rm> */
6321 inst.operands[i].reg = val;
6322 inst.operands[i].isreg = 1;
6323 inst.operands[i].present = 1;
6325 if (rtype == REG_TYPE_NQ)
6327 first_error (_("can't use Neon quad register here"));
6330 else if (rtype != REG_TYPE_VFS)
6333 if (skip_past_comma (&ptr) == FAIL)
6335 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6337 inst.operands[i].reg = val;
6338 inst.operands[i].isreg = 1;
6339 inst.operands[i].present = 1;
6342 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6345 /* Case 0: VMOV<c><q> <Qd>, <Qm>
6346 Case 1: VMOV<c><q> <Dd>, <Dm>
6347 Case 8: VMOV.F32 <Sd>, <Sm>
6348 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
6350 inst.operands[i].reg = val;
6351 inst.operands[i].isreg = 1;
6352 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6353 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6354 inst.operands[i].isvec = 1;
6355 inst.operands[i].vectype = optype;
6356 inst.operands[i].present = 1;
6358 if (skip_past_comma (&ptr) == SUCCESS)
6363 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6366 inst.operands[i].reg = val;
6367 inst.operands[i].isreg = 1;
6368 inst.operands[i++].present = 1;
6370 if (skip_past_comma (&ptr) == FAIL)
6373 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6376 inst.operands[i].reg = val;
6377 inst.operands[i].isreg = 1;
6378 inst.operands[i].present = 1;
6381 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6382 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6383 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6384 Case 10: VMOV.F32 <Sd>, #<imm>
6385 Case 11: VMOV.F64 <Dd>, #<imm> */
6386 inst.operands[i].immisfloat = 1;
6387 else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6389 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6390 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
6394 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6398 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6401 inst.operands[i].reg = val;
6402 inst.operands[i].isreg = 1;
6403 inst.operands[i++].present = 1;
6405 if (skip_past_comma (&ptr) == FAIL)
6408 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
6410 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
6411 inst.operands[i].reg = val;
6412 inst.operands[i].isscalar = 1;
6413 inst.operands[i].present = 1;
6414 inst.operands[i].vectype = optype;
6416 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6418 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
6419 inst.operands[i].reg = val;
6420 inst.operands[i].isreg = 1;
6421 inst.operands[i++].present = 1;
6423 if (skip_past_comma (&ptr) == FAIL)
6426 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6429 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
6433 inst.operands[i].reg = val;
6434 inst.operands[i].isreg = 1;
6435 inst.operands[i].isvec = 1;
6436 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6437 inst.operands[i].vectype = optype;
6438 inst.operands[i].present = 1;
6440 if (rtype == REG_TYPE_VFS)
6444 if (skip_past_comma (&ptr) == FAIL)
6446 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6449 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6452 inst.operands[i].reg = val;
6453 inst.operands[i].isreg = 1;
6454 inst.operands[i].isvec = 1;
6455 inst.operands[i].issingle = 1;
6456 inst.operands[i].vectype = optype;
6457 inst.operands[i].present = 1;
6460 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
6464 inst.operands[i].reg = val;
6465 inst.operands[i].isreg = 1;
6466 inst.operands[i].isvec = 1;
6467 inst.operands[i].issingle = 1;
6468 inst.operands[i].vectype = optype;
6469 inst.operands[i].present = 1;
6474 first_error (_("parse error"));
6478 /* Successfully parsed the operands. Update args. */
6484 first_error (_("expected comma"));
6488 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
6492 /* Use this macro when the operand constraints are different
6493 for ARM and THUMB (e.g. ldrd). */
6494 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
6495 ((arm_operand) | ((thumb_operand) << 16))
6497 /* Matcher codes for parse_operands. */
6498 enum operand_parse_code
6500 OP_stop, /* end of line */
6502 OP_RR, /* ARM register */
6503 OP_RRnpc, /* ARM register, not r15 */
6504 OP_RRnpcsp, /* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
6505 OP_RRnpcb, /* ARM register, not r15, in square brackets */
6506 OP_RRnpctw, /* ARM register, not r15 in Thumb-state or with writeback,
6507 optional trailing ! */
6508 OP_RRw, /* ARM register, not r15, optional trailing ! */
6509 OP_RCP, /* Coprocessor number */
6510 OP_RCN, /* Coprocessor register */
6511 OP_RF, /* FPA register */
6512 OP_RVS, /* VFP single precision register */
6513 OP_RVD, /* VFP double precision register (0..15) */
6514 OP_RND, /* Neon double precision register (0..31) */
6515 OP_RNQ, /* Neon quad precision register */
6516 OP_RVSD, /* VFP single or double precision register */
6517 OP_RNSD, /* Neon single or double precision register */
6518 OP_RNDQ, /* Neon double or quad precision register */
6519 OP_RNSDQ, /* Neon single, double or quad precision register */
6520 OP_RNSC, /* Neon scalar D[X] */
6521 OP_RVC, /* VFP control register */
6522 OP_RMF, /* Maverick F register */
6523 OP_RMD, /* Maverick D register */
6524 OP_RMFX, /* Maverick FX register */
6525 OP_RMDX, /* Maverick DX register */
6526 OP_RMAX, /* Maverick AX register */
6527 OP_RMDS, /* Maverick DSPSC register */
6528 OP_RIWR, /* iWMMXt wR register */
6529 OP_RIWC, /* iWMMXt wC register */
6530 OP_RIWG, /* iWMMXt wCG register */
6531 OP_RXA, /* XScale accumulator register */
6533 OP_REGLST, /* ARM register list */
6534 OP_VRSLST, /* VFP single-precision register list */
6535 OP_VRDLST, /* VFP double-precision register list */
6536 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
6537 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
6538 OP_NSTRLST, /* Neon element/structure list */
6540 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
6541 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
6542 OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero. */
6543 OP_RR_RNSC, /* ARM reg or Neon scalar. */
6544 OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar. */
6545 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
6546 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
6547 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
6548 OP_VMOV, /* Neon VMOV operands. */
6549 OP_RNDQ_Ibig, /* Neon D or Q reg, or big immediate for logic and VMVN. */
6550 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
6551 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
6553 OP_I0, /* immediate zero */
6554 OP_I7, /* immediate value 0 .. 7 */
6555 OP_I15, /* 0 .. 15 */
6556 OP_I16, /* 1 .. 16 */
6557 OP_I16z, /* 0 .. 16 */
6558 OP_I31, /* 0 .. 31 */
6559 OP_I31w, /* 0 .. 31, optional trailing ! */
6560 OP_I32, /* 1 .. 32 */
6561 OP_I32z, /* 0 .. 32 */
6562 OP_I63, /* 0 .. 63 */
6563 OP_I63s, /* -64 .. 63 */
6564 OP_I64, /* 1 .. 64 */
6565 OP_I64z, /* 0 .. 64 */
6566 OP_I255, /* 0 .. 255 */
6568 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
6569 OP_I7b, /* 0 .. 7 */
6570 OP_I15b, /* 0 .. 15 */
6571 OP_I31b, /* 0 .. 31 */
6573 OP_SH, /* shifter operand */
6574 OP_SHG, /* shifter operand with possible group relocation */
6575 OP_ADDR, /* Memory address expression (any mode) */
6576 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
6577 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
6578 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
6579 OP_EXP, /* arbitrary expression */
6580 OP_EXPi, /* same, with optional immediate prefix */
6581 OP_EXPr, /* same, with optional relocation suffix */
6582 OP_HALF, /* 0 .. 65535 or low/high reloc. */
6583 OP_IROT1, /* VCADD rotate immediate: 90, 270. */
6584 OP_IROT2, /* VCMLA rotate immediate: 0, 90, 180, 270. */
6586 OP_CPSF, /* CPS flags */
6587 OP_ENDI, /* Endianness specifier */
6588 OP_wPSR, /* CPSR/SPSR/APSR mask for msr (writing). */
6589 OP_rPSR, /* CPSR/SPSR/APSR mask for msr (reading). */
6590 OP_COND, /* conditional code */
6591 OP_TB, /* Table branch. */
6593 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
6595 OP_RRnpc_I0, /* ARM register or literal 0 */
6596 OP_RR_EXr, /* ARM register or expression with opt. reloc stuff. */
6597 OP_RR_EXi, /* ARM register or expression with imm prefix */
6598 OP_RF_IF, /* FPA register or immediate */
6599 OP_RIWR_RIWC, /* iWMMXt R or C reg */
6600 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
6602 /* Optional operands. */
6603 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
6604 OP_oI31b, /* 0 .. 31 */
6605 OP_oI32b, /* 1 .. 32 */
6606 OP_oI32z, /* 0 .. 32 */
6607 OP_oIffffb, /* 0 .. 65535 */
6608 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
6610 OP_oRR, /* ARM register */
6611 OP_oRRnpc, /* ARM register, not the PC */
6612 OP_oRRnpcsp, /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
6613 OP_oRRw, /* ARM register, not r15, optional trailing ! */
6614 OP_oRND, /* Optional Neon double precision register */
6615 OP_oRNQ, /* Optional Neon quad precision register */
6616 OP_oRNDQ, /* Optional Neon double or quad precision register */
6617 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
6618 OP_oSHll, /* LSL immediate */
6619 OP_oSHar, /* ASR immediate */
6620 OP_oSHllar, /* LSL or ASR immediate */
6621 OP_oROR, /* ROR 0/8/16/24 */
6622 OP_oBARRIER_I15, /* Option argument for a barrier instruction. */
6624 /* Some pre-defined mixed (ARM/THUMB) operands. */
6625 OP_RR_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
6626 OP_RRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
6627 OP_oRRnpc_npcsp = MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
6629 OP_FIRST_OPTIONAL = OP_oI7b
6632 /* Generic instruction operand parser. This does no encoding and no
6633 semantic validation; it merely squirrels values away in the inst
6634 structure. Returns SUCCESS or FAIL depending on whether the
6635 specified grammar matched. */
6637 parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
6639 unsigned const int *upat = pattern;
6640 char *backtrack_pos = 0;
6641 const char *backtrack_error = 0;
6642 int i, val = 0, backtrack_index = 0;
6643 enum arm_reg_type rtype;
6644 parse_operand_result result;
6645 unsigned int op_parse_code;
6647 #define po_char_or_fail(chr) \
6650 if (skip_past_char (&str, chr) == FAIL) \
6655 #define po_reg_or_fail(regtype) \
6658 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6659 & inst.operands[i].vectype); \
6662 first_error (_(reg_expected_msgs[regtype])); \
6665 inst.operands[i].reg = val; \
6666 inst.operands[i].isreg = 1; \
6667 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6668 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6669 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6670 || rtype == REG_TYPE_VFD \
6671 || rtype == REG_TYPE_NQ); \
6675 #define po_reg_or_goto(regtype, label) \
6678 val = arm_typed_reg_parse (& str, regtype, & rtype, \
6679 & inst.operands[i].vectype); \
6683 inst.operands[i].reg = val; \
6684 inst.operands[i].isreg = 1; \
6685 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
6686 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
6687 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
6688 || rtype == REG_TYPE_VFD \
6689 || rtype == REG_TYPE_NQ); \
6693 #define po_imm_or_fail(min, max, popt) \
6696 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
6698 inst.operands[i].imm = val; \
6702 #define po_scalar_or_goto(elsz, label) \
6705 val = parse_scalar (& str, elsz, & inst.operands[i].vectype); \
6708 inst.operands[i].reg = val; \
6709 inst.operands[i].isscalar = 1; \
6713 #define po_misc_or_fail(expr) \
6721 #define po_misc_or_fail_no_backtrack(expr) \
6725 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK) \
6726 backtrack_pos = 0; \
6727 if (result != PARSE_OPERAND_SUCCESS) \
6732 #define po_barrier_or_imm(str) \
6735 val = parse_barrier (&str); \
6736 if (val == FAIL && ! ISALPHA (*str)) \
6739 /* ISB can only take SY as an option. */ \
6740 || ((inst.instruction & 0xf0) == 0x60 \
6743 inst.error = _("invalid barrier type"); \
6744 backtrack_pos = 0; \
6750 skip_whitespace (str);
6752 for (i = 0; upat[i] != OP_stop; i++)
6754 op_parse_code = upat[i];
6755 if (op_parse_code >= 1<<16)
6756 op_parse_code = thumb ? (op_parse_code >> 16)
6757 : (op_parse_code & ((1<<16)-1));
6759 if (op_parse_code >= OP_FIRST_OPTIONAL)
6761 /* Remember where we are in case we need to backtrack. */
6762 gas_assert (!backtrack_pos);
6763 backtrack_pos = str;
6764 backtrack_error = inst.error;
6765 backtrack_index = i;
6768 if (i > 0 && (i > 1 || inst.operands[0].present))
6769 po_char_or_fail (',');
6771 switch (op_parse_code)
6779 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
6780 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
6781 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
6782 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
6783 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
6784 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
6786 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
6788 po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
6790 /* Also accept generic coprocessor regs for unknown registers. */
6792 po_reg_or_fail (REG_TYPE_CN);
6794 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
6795 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
6796 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
6797 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
6798 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
6799 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
6800 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
6801 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
6802 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
6803 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
6805 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
6806 case OP_RNSD: po_reg_or_fail (REG_TYPE_NSD); break;
6808 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
6809 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
6811 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
6813 /* Neon scalar. Using an element size of 8 means that some invalid
6814 scalars are accepted here, so deal with those in later code. */
6815 case OP_RNSC: po_scalar_or_goto (8, failure); break;
6819 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
6822 po_imm_or_fail (0, 0, TRUE);
6827 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
6832 po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
6835 if (parse_ifimm_zero (&str))
6836 inst.operands[i].imm = 0;
6840 = _("only floating point zero is allowed as immediate value");
6848 po_scalar_or_goto (8, try_rr);
6851 po_reg_or_fail (REG_TYPE_RN);
6857 po_scalar_or_goto (8, try_nsdq);
6860 po_reg_or_fail (REG_TYPE_NSDQ);
6866 po_scalar_or_goto (8, try_s_scalar);
6869 po_scalar_or_goto (4, try_nsd);
6872 po_reg_or_fail (REG_TYPE_NSD);
6878 po_scalar_or_goto (8, try_ndq);
6881 po_reg_or_fail (REG_TYPE_NDQ);
6887 po_scalar_or_goto (8, try_vfd);
6890 po_reg_or_fail (REG_TYPE_VFD);
6895 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
6896 not careful then bad things might happen. */
6897 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
6902 po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
6905 /* There's a possibility of getting a 64-bit immediate here, so
6906 we need special handling. */
6907 if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
6910 inst.error = _("immediate value is out of range");
6918 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
6921 po_imm_or_fail (0, 63, TRUE);
6926 po_char_or_fail ('[');
6927 po_reg_or_fail (REG_TYPE_RN);
6928 po_char_or_fail (']');
6934 po_reg_or_fail (REG_TYPE_RN);
6935 if (skip_past_char (&str, '!') == SUCCESS)
6936 inst.operands[i].writeback = 1;
6940 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
6941 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
6942 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
6943 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
6944 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
6945 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
6946 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
6947 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
6948 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
6949 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
6950 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
6951 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
6953 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
6955 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
6956 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
6958 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
6959 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
6960 case OP_oI32z: po_imm_or_fail ( 0, 32, TRUE); break;
6961 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
6963 /* Immediate variants */
6965 po_char_or_fail ('{');
6966 po_imm_or_fail (0, 255, TRUE);
6967 po_char_or_fail ('}');
6971 /* The expression parser chokes on a trailing !, so we have
6972 to find it first and zap it. */
6975 while (*s && *s != ',')
6980 inst.operands[i].writeback = 1;
6982 po_imm_or_fail (0, 31, TRUE);
6990 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
6995 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
7000 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
7002 if (inst.reloc.exp.X_op == O_symbol)
7004 val = parse_reloc (&str);
7007 inst.error = _("unrecognized relocation suffix");
7010 else if (val != BFD_RELOC_UNUSED)
7012 inst.operands[i].imm = val;
7013 inst.operands[i].hasreloc = 1;
7018 /* Operand for MOVW or MOVT. */
7020 po_misc_or_fail (parse_half (&str));
7023 /* Register or expression. */
7024 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7025 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7027 /* Register or immediate. */
7028 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
7029 I0: po_imm_or_fail (0, 0, FALSE); break;
7031 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
7033 if (!is_immediate_prefix (*str))
7036 val = parse_fpa_immediate (&str);
7039 /* FPA immediates are encoded as registers 8-15.
7040 parse_fpa_immediate has already applied the offset. */
7041 inst.operands[i].reg = val;
7042 inst.operands[i].isreg = 1;
7045 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7046 I32z: po_imm_or_fail (0, 32, FALSE); break;
7048 /* Two kinds of register. */
7051 struct reg_entry *rege = arm_reg_parse_multi (&str);
7053 || (rege->type != REG_TYPE_MMXWR
7054 && rege->type != REG_TYPE_MMXWC
7055 && rege->type != REG_TYPE_MMXWCG))
7057 inst.error = _("iWMMXt data or control register expected");
7060 inst.operands[i].reg = rege->number;
7061 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7067 struct reg_entry *rege = arm_reg_parse_multi (&str);
7069 || (rege->type != REG_TYPE_MMXWC
7070 && rege->type != REG_TYPE_MMXWCG))
7072 inst.error = _("iWMMXt control register expected");
7075 inst.operands[i].reg = rege->number;
7076 inst.operands[i].isreg = 1;
7081 case OP_CPSF: val = parse_cps_flags (&str); break;
7082 case OP_ENDI: val = parse_endian_specifier (&str); break;
7083 case OP_oROR: val = parse_ror (&str); break;
7084 case OP_COND: val = parse_cond (&str); break;
7085 case OP_oBARRIER_I15:
7086 po_barrier_or_imm (str); break;
7088 if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7094 po_reg_or_goto (REG_TYPE_RNB, try_psr);
7095 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7097 inst.error = _("Banked registers are not available with this "
7103 val = parse_psr (&str, op_parse_code == OP_wPSR);
7107 po_reg_or_goto (REG_TYPE_RN, try_apsr);
7110 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7112 if (strncasecmp (str, "APSR_", 5) == 0)
7119 case 'c': found = (found & 1) ? 16 : found | 1; break;
7120 case 'n': found = (found & 2) ? 16 : found | 2; break;
7121 case 'z': found = (found & 4) ? 16 : found | 4; break;
7122 case 'v': found = (found & 8) ? 16 : found | 8; break;
7123 default: found = 16;
7127 inst.operands[i].isvec = 1;
7128 /* APSR_nzcv is encoded in instructions as if it were the REG_PC. */
7129 inst.operands[i].reg = REG_PC;
7136 po_misc_or_fail (parse_tb (&str));
7139 /* Register lists. */
7141 val = parse_reg_list (&str);
7144 inst.operands[i].writeback = 1;
7150 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
7154 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
7158 /* Allow Q registers too. */
7159 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7164 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7166 inst.operands[i].issingle = 1;
7171 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7176 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7177 &inst.operands[i].vectype);
7180 /* Addressing modes */
7182 po_misc_or_fail (parse_address (&str, i));
7186 po_misc_or_fail_no_backtrack (
7187 parse_address_group_reloc (&str, i, GROUP_LDR));
7191 po_misc_or_fail_no_backtrack (
7192 parse_address_group_reloc (&str, i, GROUP_LDRS));
7196 po_misc_or_fail_no_backtrack (
7197 parse_address_group_reloc (&str, i, GROUP_LDC));
7201 po_misc_or_fail (parse_shifter_operand (&str, i));
7205 po_misc_or_fail_no_backtrack (
7206 parse_shifter_operand_group_reloc (&str, i));
7210 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
7214 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
7218 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
7222 as_fatal (_("unhandled operand code %d"), op_parse_code);
7225 /* Various value-based sanity checks and shared operations. We
7226 do not signal immediate failures for the register constraints;
7227 this allows a syntax error to take precedence. */
7228 switch (op_parse_code)
7236 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
7237 inst.error = BAD_PC;
7242 if (inst.operands[i].isreg)
7244 if (inst.operands[i].reg == REG_PC)
7245 inst.error = BAD_PC;
7246 else if (inst.operands[i].reg == REG_SP
7247 /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
7248 relaxed since ARMv8-A. */
7249 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
7252 inst.error = BAD_SP;
7258 if (inst.operands[i].isreg
7259 && inst.operands[i].reg == REG_PC
7260 && (inst.operands[i].writeback || thumb))
7261 inst.error = BAD_PC;
7270 case OP_oBARRIER_I15:
7279 inst.operands[i].imm = val;
7286 /* If we get here, this operand was successfully parsed. */
7287 inst.operands[i].present = 1;
7291 inst.error = BAD_ARGS;
7296 /* The parse routine should already have set inst.error, but set a
7297 default here just in case. */
7299 inst.error = _("syntax error");
7303 /* Do not backtrack over a trailing optional argument that
7304 absorbed some text. We will only fail again, with the
7305 'garbage following instruction' error message, which is
7306 probably less helpful than the current one. */
7307 if (backtrack_index == i && backtrack_pos != str
7308 && upat[i+1] == OP_stop)
7311 inst.error = _("syntax error");
7315 /* Try again, skipping the optional argument at backtrack_pos. */
7316 str = backtrack_pos;
7317 inst.error = backtrack_error;
7318 inst.operands[backtrack_index].present = 0;
7319 i = backtrack_index;
7323 /* Check that we have parsed all the arguments. */
7324 if (*str != '\0' && !inst.error)
7325 inst.error = _("garbage following instruction");
7327 return inst.error ? FAIL : SUCCESS;
7330 #undef po_char_or_fail
7331 #undef po_reg_or_fail
7332 #undef po_reg_or_goto
7333 #undef po_imm_or_fail
7334 #undef po_scalar_or_fail
7335 #undef po_barrier_or_imm
7337 /* Shorthand macro for instruction encoding functions issuing errors. */
7338 #define constraint(expr, err) \
7349 /* Reject "bad registers" for Thumb-2 instructions. Many Thumb-2
7350 instructions are unpredictable if these registers are used. This
7351 is the BadReg predicate in ARM's Thumb-2 documentation.
7353 Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
7354 places, while the restriction on REG_SP was relaxed since ARMv8-A. */
7355 #define reject_bad_reg(reg) \
7357 if (reg == REG_PC) \
7359 inst.error = BAD_PC; \
7362 else if (reg == REG_SP \
7363 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)) \
7365 inst.error = BAD_SP; \
7370 /* If REG is R13 (the stack pointer), warn that its use is
7372 #define warn_deprecated_sp(reg) \
7374 if (warn_on_deprecated && reg == REG_SP) \
7375 as_tsktsk (_("use of r13 is deprecated")); \
7378 /* Functions for operand encoding. ARM, then Thumb. */
7380 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
7382 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
7384 The only binary encoding difference is the Coprocessor number. Coprocessor
7385 9 is used for half-precision calculations or conversions. The format of the
7386 instruction is the same as the equivalent Coprocessor 10 instruction that
7387 exists for Single-Precision operation. */
7390 do_scalar_fp16_v82_encode (void)
7392 if (inst.cond != COND_ALWAYS)
7393 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
7394 " the behaviour is UNPREDICTABLE"));
7395 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
7398 inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
7399 mark_feature_used (&arm_ext_fp16);
7402 /* If VAL can be encoded in the immediate field of an ARM instruction,
7403 return the encoded form. Otherwise, return FAIL. */
7406 encode_arm_immediate (unsigned int val)
7413 for (i = 2; i < 32; i += 2)
7414 if ((a = rotate_left (val, i)) <= 0xff)
7415 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
7420 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
7421 return the encoded form. Otherwise, return FAIL. */
7423 encode_thumb32_immediate (unsigned int val)
7430 for (i = 1; i <= 24; i++)
7433 if ((val & ~(0xff << i)) == 0)
7434 return ((val >> i) & 0x7f) | ((32 - i) << 7);
7438 if (val == ((a << 16) | a))
7440 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
7444 if (val == ((a << 16) | a))
7445 return 0x200 | (a >> 8);
7449 /* Encode a VFP SP or DP register number into inst.instruction. */
7452 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
7454 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
7457 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
7460 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
7463 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
7468 first_error (_("D register out of range for selected VFP version"));
7476 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
7480 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
7484 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
7488 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
7492 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
7496 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
7504 /* Encode a <shift> in an ARM-format instruction. The immediate,
7505 if any, is handled by md_apply_fix. */
7507 encode_arm_shift (int i)
7509 /* register-shifted register. */
7510 if (inst.operands[i].immisreg)
7513 for (op_index = 0; op_index <= i; ++op_index)
7515 /* Check the operand only when it's presented. In pre-UAL syntax,
7516 if the destination register is the same as the first operand, two
7517 register form of the instruction can be used. */
7518 if (inst.operands[op_index].present && inst.operands[op_index].isreg
7519 && inst.operands[op_index].reg == REG_PC)
7520 as_warn (UNPRED_REG ("r15"));
7523 if (inst.operands[i].imm == REG_PC)
7524 as_warn (UNPRED_REG ("r15"));
7527 if (inst.operands[i].shift_kind == SHIFT_RRX)
7528 inst.instruction |= SHIFT_ROR << 5;
7531 inst.instruction |= inst.operands[i].shift_kind << 5;
7532 if (inst.operands[i].immisreg)
7534 inst.instruction |= SHIFT_BY_REG;
7535 inst.instruction |= inst.operands[i].imm << 8;
7538 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7543 encode_arm_shifter_operand (int i)
7545 if (inst.operands[i].isreg)
7547 inst.instruction |= inst.operands[i].reg;
7548 encode_arm_shift (i);
7552 inst.instruction |= INST_IMMEDIATE;
7553 if (inst.reloc.type != BFD_RELOC_ARM_IMMEDIATE)
7554 inst.instruction |= inst.operands[i].imm;
7558 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
7560 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
7563 Generate an error if the operand is not a register. */
7564 constraint (!inst.operands[i].isreg,
7565 _("Instruction does not support =N addresses"));
7567 inst.instruction |= inst.operands[i].reg << 16;
7569 if (inst.operands[i].preind)
7573 inst.error = _("instruction does not accept preindexed addressing");
7576 inst.instruction |= PRE_INDEX;
7577 if (inst.operands[i].writeback)
7578 inst.instruction |= WRITE_BACK;
7581 else if (inst.operands[i].postind)
7583 gas_assert (inst.operands[i].writeback);
7585 inst.instruction |= WRITE_BACK;
7587 else /* unindexed - only for coprocessor */
7589 inst.error = _("instruction does not accept unindexed addressing");
7593 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
7594 && (((inst.instruction & 0x000f0000) >> 16)
7595 == ((inst.instruction & 0x0000f000) >> 12)))
7596 as_warn ((inst.instruction & LOAD_BIT)
7597 ? _("destination register same as write-back base")
7598 : _("source register same as write-back base"));
7601 /* inst.operands[i] was set up by parse_address. Encode it into an
7602 ARM-format mode 2 load or store instruction. If is_t is true,
7603 reject forms that cannot be used with a T instruction (i.e. not
7606 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
7608 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7610 encode_arm_addr_mode_common (i, is_t);
7612 if (inst.operands[i].immisreg)
7614 constraint ((inst.operands[i].imm == REG_PC
7615 || (is_pc && inst.operands[i].writeback)),
7617 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
7618 inst.instruction |= inst.operands[i].imm;
7619 if (!inst.operands[i].negative)
7620 inst.instruction |= INDEX_UP;
7621 if (inst.operands[i].shifted)
7623 if (inst.operands[i].shift_kind == SHIFT_RRX)
7624 inst.instruction |= SHIFT_ROR << 5;
7627 inst.instruction |= inst.operands[i].shift_kind << 5;
7628 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7632 else /* immediate offset in inst.reloc */
7634 if (is_pc && !inst.reloc.pc_rel)
7636 const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
7638 /* If is_t is TRUE, it's called from do_ldstt. ldrt/strt
7639 cannot use PC in addressing.
7640 PC cannot be used in writeback addressing, either. */
7641 constraint ((is_t || inst.operands[i].writeback),
7644 /* Use of PC in str is deprecated for ARMv7. */
7645 if (warn_on_deprecated
7647 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
7648 as_tsktsk (_("use of PC in this instruction is deprecated"));
7651 if (inst.reloc.type == BFD_RELOC_UNUSED)
7653 /* Prefer + for zero encoded value. */
7654 if (!inst.operands[i].negative)
7655 inst.instruction |= INDEX_UP;
7656 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
7661 /* inst.operands[i] was set up by parse_address. Encode it into an
7662 ARM-format mode 3 load or store instruction. Reject forms that
7663 cannot be used with such instructions. If is_t is true, reject
7664 forms that cannot be used with a T instruction (i.e. not
7667 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
7669 if (inst.operands[i].immisreg && inst.operands[i].shifted)
7671 inst.error = _("instruction does not accept scaled register index");
7675 encode_arm_addr_mode_common (i, is_t);
7677 if (inst.operands[i].immisreg)
7679 constraint ((inst.operands[i].imm == REG_PC
7680 || (is_t && inst.operands[i].reg == REG_PC)),
7682 constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
7684 inst.instruction |= inst.operands[i].imm;
7685 if (!inst.operands[i].negative)
7686 inst.instruction |= INDEX_UP;
7688 else /* immediate offset in inst.reloc */
7690 constraint ((inst.operands[i].reg == REG_PC && !inst.reloc.pc_rel
7691 && inst.operands[i].writeback),
7693 inst.instruction |= HWOFFSET_IMM;
7694 if (inst.reloc.type == BFD_RELOC_UNUSED)
7696 /* Prefer + for zero encoded value. */
7697 if (!inst.operands[i].negative)
7698 inst.instruction |= INDEX_UP;
7700 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
7705 /* Write immediate bits [7:0] to the following locations:
7707 |28/24|23 19|18 16|15 4|3 0|
7708 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
7710 This function is used by VMOV/VMVN/VORR/VBIC. */
7713 neon_write_immbits (unsigned immbits)
7715 inst.instruction |= immbits & 0xf;
7716 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
7717 inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
7720 /* Invert low-order SIZE bits of XHI:XLO. */
7723 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
7725 unsigned immlo = xlo ? *xlo : 0;
7726 unsigned immhi = xhi ? *xhi : 0;
7731 immlo = (~immlo) & 0xff;
7735 immlo = (~immlo) & 0xffff;
7739 immhi = (~immhi) & 0xffffffff;
7743 immlo = (~immlo) & 0xffffffff;
7757 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
7761 neon_bits_same_in_bytes (unsigned imm)
7763 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
7764 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
7765 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
7766 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
7769 /* For immediate of above form, return 0bABCD. */
7772 neon_squash_bits (unsigned imm)
7774 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
7775 | ((imm & 0x01000000) >> 21);
7778 /* Compress quarter-float representation to 0b...000 abcdefgh. */
7781 neon_qfloat_bits (unsigned imm)
7783 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
7786 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
7787 the instruction. *OP is passed as the initial value of the op field, and
7788 may be set to a different value depending on the constant (i.e.
7789 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
7790 MVN). If the immediate looks like a repeated pattern then also
7791 try smaller element sizes. */
7794 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
7795 unsigned *immbits, int *op, int size,
7796 enum neon_el_type type)
7798 /* Only permit float immediates (including 0.0/-0.0) if the operand type is
7800 if (type == NT_float && !float_p)
7803 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
7805 if (size != 32 || *op == 1)
7807 *immbits = neon_qfloat_bits (immlo);
7813 if (neon_bits_same_in_bytes (immhi)
7814 && neon_bits_same_in_bytes (immlo))
7818 *immbits = (neon_squash_bits (immhi) << 4)
7819 | neon_squash_bits (immlo);
7830 if (immlo == (immlo & 0x000000ff))
7835 else if (immlo == (immlo & 0x0000ff00))
7837 *immbits = immlo >> 8;
7840 else if (immlo == (immlo & 0x00ff0000))
7842 *immbits = immlo >> 16;
7845 else if (immlo == (immlo & 0xff000000))
7847 *immbits = immlo >> 24;
7850 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
7852 *immbits = (immlo >> 8) & 0xff;
7855 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
7857 *immbits = (immlo >> 16) & 0xff;
7861 if ((immlo & 0xffff) != (immlo >> 16))
7868 if (immlo == (immlo & 0x000000ff))
7873 else if (immlo == (immlo & 0x0000ff00))
7875 *immbits = immlo >> 8;
7879 if ((immlo & 0xff) != (immlo >> 8))
7884 if (immlo == (immlo & 0x000000ff))
7886 /* Don't allow MVN with 8-bit immediate. */
7896 #if defined BFD_HOST_64_BIT
7897 /* Returns TRUE if double precision value V may be cast
7898 to single precision without loss of accuracy. */
7901 is_double_a_single (bfd_int64_t v)
7903 int exp = (int)((v >> 52) & 0x7FF);
7904 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7906 return (exp == 0 || exp == 0x7FF
7907 || (exp >= 1023 - 126 && exp <= 1023 + 127))
7908 && (mantissa & 0x1FFFFFFFl) == 0;
7911 /* Returns a double precision value casted to single precision
7912 (ignoring the least significant bits in exponent and mantissa). */
7915 double_to_single (bfd_int64_t v)
7917 int sign = (int) ((v >> 63) & 1l);
7918 int exp = (int) ((v >> 52) & 0x7FF);
7919 bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
7925 exp = exp - 1023 + 127;
7934 /* No denormalized numbers. */
7940 return (sign << 31) | (exp << 23) | mantissa;
7942 #endif /* BFD_HOST_64_BIT */
7951 static void do_vfp_nsyn_opcode (const char *);
7953 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
7954 Determine whether it can be performed with a move instruction; if
7955 it can, convert inst.instruction to that move instruction and
7956 return TRUE; if it can't, convert inst.instruction to a literal-pool
7957 load and return FALSE. If this is not a valid thing to do in the
7958 current context, set inst.error and return TRUE.
7960 inst.operands[i] describes the destination register. */
7963 move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
7966 bfd_boolean thumb_p = (t == CONST_THUMB);
7967 bfd_boolean arm_p = (t == CONST_ARM);
7970 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
7974 if ((inst.instruction & tbit) == 0)
7976 inst.error = _("invalid pseudo operation");
7980 if (inst.reloc.exp.X_op != O_constant
7981 && inst.reloc.exp.X_op != O_symbol
7982 && inst.reloc.exp.X_op != O_big)
7984 inst.error = _("constant expression expected");
7988 if (inst.reloc.exp.X_op == O_constant
7989 || inst.reloc.exp.X_op == O_big)
7991 #if defined BFD_HOST_64_BIT
7996 if (inst.reloc.exp.X_op == O_big)
7998 LITTLENUM_TYPE w[X_PRECISION];
8001 if (inst.reloc.exp.X_add_number == -1)
8003 gen_to_words (w, X_PRECISION, E_PRECISION);
8005 /* FIXME: Should we check words w[2..5] ? */
8010 #if defined BFD_HOST_64_BIT
8012 ((((((((bfd_int64_t) l[3] & LITTLENUM_MASK)
8013 << LITTLENUM_NUMBER_OF_BITS)
8014 | ((bfd_int64_t) l[2] & LITTLENUM_MASK))
8015 << LITTLENUM_NUMBER_OF_BITS)
8016 | ((bfd_int64_t) l[1] & LITTLENUM_MASK))
8017 << LITTLENUM_NUMBER_OF_BITS)
8018 | ((bfd_int64_t) l[0] & LITTLENUM_MASK));
8020 v = ((l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8021 | (l[0] & LITTLENUM_MASK);
8025 v = inst.reloc.exp.X_add_number;
8027 if (!inst.operands[i].issingle)
8031 /* LDR should not use lead in a flag-setting instruction being
8032 chosen so we do not check whether movs can be used. */
8034 if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8035 || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8036 && inst.operands[i].reg != 13
8037 && inst.operands[i].reg != 15)
8039 /* Check if on thumb2 it can be done with a mov.w, mvn or
8040 movw instruction. */
8041 unsigned int newimm;
8042 bfd_boolean isNegated;
8044 newimm = encode_thumb32_immediate (v);
8045 if (newimm != (unsigned int) FAIL)
8049 newimm = encode_thumb32_immediate (~v);
8050 if (newimm != (unsigned int) FAIL)
8054 /* The number can be loaded with a mov.w or mvn
8056 if (newimm != (unsigned int) FAIL
8057 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8059 inst.instruction = (0xf04f0000 /* MOV.W. */
8060 | (inst.operands[i].reg << 8));
8061 /* Change to MOVN. */
8062 inst.instruction |= (isNegated ? 0x200000 : 0);
8063 inst.instruction |= (newimm & 0x800) << 15;
8064 inst.instruction |= (newimm & 0x700) << 4;
8065 inst.instruction |= (newimm & 0x0ff);
8068 /* The number can be loaded with a movw instruction. */
8069 else if ((v & ~0xFFFF) == 0
8070 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8072 int imm = v & 0xFFFF;
8074 inst.instruction = 0xf2400000; /* MOVW. */
8075 inst.instruction |= (inst.operands[i].reg << 8);
8076 inst.instruction |= (imm & 0xf000) << 4;
8077 inst.instruction |= (imm & 0x0800) << 15;
8078 inst.instruction |= (imm & 0x0700) << 4;
8079 inst.instruction |= (imm & 0x00ff);
8086 int value = encode_arm_immediate (v);
8090 /* This can be done with a mov instruction. */
8091 inst.instruction &= LITERAL_MASK;
8092 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8093 inst.instruction |= value & 0xfff;
8097 value = encode_arm_immediate (~ v);
8100 /* This can be done with a mvn instruction. */
8101 inst.instruction &= LITERAL_MASK;
8102 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8103 inst.instruction |= value & 0xfff;
8107 else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8110 unsigned immbits = 0;
8111 unsigned immlo = inst.operands[1].imm;
8112 unsigned immhi = inst.operands[1].regisimm
8113 ? inst.operands[1].reg
8114 : inst.reloc.exp.X_unsigned
8116 : ((bfd_int64_t)((int) immlo)) >> 32;
8117 int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8118 &op, 64, NT_invtype);
8122 neon_invert_size (&immlo, &immhi, 64);
8124 cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8125 &op, 64, NT_invtype);
8130 inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8136 /* Fill other bits in vmov encoding for both thumb and arm. */
8138 inst.instruction |= (0x7U << 29) | (0xF << 24);
8140 inst.instruction |= (0xFU << 28) | (0x1 << 25);
8141 neon_write_immbits (immbits);
8149 /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant. */
8150 if (inst.operands[i].issingle
8151 && is_quarter_float (inst.operands[1].imm)
8152 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
8154 inst.operands[1].imm =
8155 neon_qfloat_bits (v);
8156 do_vfp_nsyn_opcode ("fconsts");
8160 /* If our host does not support a 64-bit type then we cannot perform
8161 the following optimization. This mean that there will be a
8162 discrepancy between the output produced by an assembler built for
8163 a 32-bit-only host and the output produced from a 64-bit host, but
8164 this cannot be helped. */
8165 #if defined BFD_HOST_64_BIT
8166 else if (!inst.operands[1].issingle
8167 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
8169 if (is_double_a_single (v)
8170 && is_quarter_float (double_to_single (v)))
8172 inst.operands[1].imm =
8173 neon_qfloat_bits (double_to_single (v));
8174 do_vfp_nsyn_opcode ("fconstd");
8182 if (add_to_lit_pool ((!inst.operands[i].isvec
8183 || inst.operands[i].issingle) ? 4 : 8) == FAIL)
8186 inst.operands[1].reg = REG_PC;
8187 inst.operands[1].isreg = 1;
8188 inst.operands[1].preind = 1;
8189 inst.reloc.pc_rel = 1;
8190 inst.reloc.type = (thumb_p
8191 ? BFD_RELOC_ARM_THUMB_OFFSET
8193 ? BFD_RELOC_ARM_HWLITERAL
8194 : BFD_RELOC_ARM_LITERAL));
8198 /* inst.operands[i] was set up by parse_address. Encode it into an
8199 ARM-format instruction. Reject all forms which cannot be encoded
8200 into a coprocessor load/store instruction. If wb_ok is false,
8201 reject use of writeback; if unind_ok is false, reject use of
8202 unindexed addressing. If reloc_override is not 0, use it instead
8203 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
8204 (in which case it is preserved). */
8207 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
8209 if (!inst.operands[i].isreg)
8212 if (! inst.operands[0].isvec)
8214 inst.error = _("invalid co-processor operand");
8217 if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
8221 inst.instruction |= inst.operands[i].reg << 16;
8223 gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
8225 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
8227 gas_assert (!inst.operands[i].writeback);
8230 inst.error = _("instruction does not support unindexed addressing");
8233 inst.instruction |= inst.operands[i].imm;
8234 inst.instruction |= INDEX_UP;
8238 if (inst.operands[i].preind)
8239 inst.instruction |= PRE_INDEX;
8241 if (inst.operands[i].writeback)
8243 if (inst.operands[i].reg == REG_PC)
8245 inst.error = _("pc may not be used with write-back");
8250 inst.error = _("instruction does not support writeback");
8253 inst.instruction |= WRITE_BACK;
8257 inst.reloc.type = (bfd_reloc_code_real_type) reloc_override;
8258 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
8259 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
8260 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
8263 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
8265 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
8268 /* Prefer + for zero encoded value. */
8269 if (!inst.operands[i].negative)
8270 inst.instruction |= INDEX_UP;
8275 /* Functions for instruction encoding, sorted by sub-architecture.
8276 First some generics; their names are taken from the conventional
8277 bit positions for register arguments in ARM format instructions. */
8287 inst.instruction |= inst.operands[0].reg << 12;
8293 inst.instruction |= inst.operands[0].reg << 16;
8299 inst.instruction |= inst.operands[0].reg << 12;
8300 inst.instruction |= inst.operands[1].reg;
8306 inst.instruction |= inst.operands[0].reg;
8307 inst.instruction |= inst.operands[1].reg << 16;
8313 inst.instruction |= inst.operands[0].reg << 12;
8314 inst.instruction |= inst.operands[1].reg << 16;
8320 inst.instruction |= inst.operands[0].reg << 16;
8321 inst.instruction |= inst.operands[1].reg << 12;
8327 inst.instruction |= inst.operands[0].reg << 8;
8328 inst.instruction |= inst.operands[1].reg << 16;
8332 check_obsolete (const arm_feature_set *feature, const char *msg)
8334 if (ARM_CPU_IS_ANY (cpu_variant))
8336 as_tsktsk ("%s", msg);
8339 else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
8351 unsigned Rn = inst.operands[2].reg;
8352 /* Enforce restrictions on SWP instruction. */
8353 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
8355 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
8356 _("Rn must not overlap other operands"));
8358 /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
8360 if (!check_obsolete (&arm_ext_v8,
8361 _("swp{b} use is obsoleted for ARMv8 and later"))
8362 && warn_on_deprecated
8363 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
8364 as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
8367 inst.instruction |= inst.operands[0].reg << 12;
8368 inst.instruction |= inst.operands[1].reg;
8369 inst.instruction |= Rn << 16;
8375 inst.instruction |= inst.operands[0].reg << 12;
8376 inst.instruction |= inst.operands[1].reg << 16;
8377 inst.instruction |= inst.operands[2].reg;
8383 constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
8384 constraint (((inst.reloc.exp.X_op != O_constant
8385 && inst.reloc.exp.X_op != O_illegal)
8386 || inst.reloc.exp.X_add_number != 0),
8388 inst.instruction |= inst.operands[0].reg;
8389 inst.instruction |= inst.operands[1].reg << 12;
8390 inst.instruction |= inst.operands[2].reg << 16;
8396 inst.instruction |= inst.operands[0].imm;
8402 inst.instruction |= inst.operands[0].reg << 12;
8403 encode_arm_cp_address (1, TRUE, TRUE, 0);
8406 /* ARM instructions, in alphabetical order by function name (except
8407 that wrapper functions appear immediately after the function they
8410 /* This is a pseudo-op of the form "adr rd, label" to be converted
8411 into a relative address of the form "add rd, pc, #label-.-8". */
8416 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8418 /* Frag hacking will turn this into a sub instruction if the offset turns
8419 out to be negative. */
8420 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
8421 inst.reloc.pc_rel = 1;
8422 inst.reloc.exp.X_add_number -= 8;
8424 if (support_interwork
8425 && inst.reloc.exp.X_op == O_symbol
8426 && inst.reloc.exp.X_add_symbol != NULL
8427 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8428 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8429 inst.reloc.exp.X_add_number |= 1;
8432 /* This is a pseudo-op of the form "adrl rd, label" to be converted
8433 into a relative address of the form:
8434 add rd, pc, #low(label-.-8)"
8435 add rd, rd, #high(label-.-8)" */
8440 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
8442 /* Frag hacking will turn this into a sub instruction if the offset turns
8443 out to be negative. */
8444 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
8445 inst.reloc.pc_rel = 1;
8446 inst.size = INSN_SIZE * 2;
8447 inst.reloc.exp.X_add_number -= 8;
8449 if (support_interwork
8450 && inst.reloc.exp.X_op == O_symbol
8451 && inst.reloc.exp.X_add_symbol != NULL
8452 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8453 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8454 inst.reloc.exp.X_add_number |= 1;
8460 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
8461 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
8463 if (!inst.operands[1].present)
8464 inst.operands[1].reg = inst.operands[0].reg;
8465 inst.instruction |= inst.operands[0].reg << 12;
8466 inst.instruction |= inst.operands[1].reg << 16;
8467 encode_arm_shifter_operand (2);
8473 if (inst.operands[0].present)
8474 inst.instruction |= inst.operands[0].imm;
8476 inst.instruction |= 0xf;
8482 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8483 constraint (msb > 32, _("bit-field extends past end of register"));
8484 /* The instruction encoding stores the LSB and MSB,
8485 not the LSB and width. */
8486 inst.instruction |= inst.operands[0].reg << 12;
8487 inst.instruction |= inst.operands[1].imm << 7;
8488 inst.instruction |= (msb - 1) << 16;
8496 /* #0 in second position is alternative syntax for bfc, which is
8497 the same instruction but with REG_PC in the Rm field. */
8498 if (!inst.operands[1].isreg)
8499 inst.operands[1].reg = REG_PC;
8501 msb = inst.operands[2].imm + inst.operands[3].imm;
8502 constraint (msb > 32, _("bit-field extends past end of register"));
8503 /* The instruction encoding stores the LSB and MSB,
8504 not the LSB and width. */
8505 inst.instruction |= inst.operands[0].reg << 12;
8506 inst.instruction |= inst.operands[1].reg;
8507 inst.instruction |= inst.operands[2].imm << 7;
8508 inst.instruction |= (msb - 1) << 16;
8514 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8515 _("bit-field extends past end of register"));
8516 inst.instruction |= inst.operands[0].reg << 12;
8517 inst.instruction |= inst.operands[1].reg;
8518 inst.instruction |= inst.operands[2].imm << 7;
8519 inst.instruction |= (inst.operands[3].imm - 1) << 16;
8522 /* ARM V5 breakpoint instruction (argument parse)
8523 BKPT <16 bit unsigned immediate>
8524 Instruction is not conditional.
8525 The bit pattern given in insns[] has the COND_ALWAYS condition,
8526 and it is an error if the caller tried to override that. */
8531 /* Top 12 of 16 bits to bits 19:8. */
8532 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
8534 /* Bottom 4 of 16 bits to bits 3:0. */
8535 inst.instruction |= inst.operands[0].imm & 0xf;
8539 encode_branch (int default_reloc)
8541 if (inst.operands[0].hasreloc)
8543 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
8544 && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
8545 _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
8546 inst.reloc.type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
8547 ? BFD_RELOC_ARM_PLT32
8548 : thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
8551 inst.reloc.type = (bfd_reloc_code_real_type) default_reloc;
8552 inst.reloc.pc_rel = 1;
8559 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8560 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8563 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8570 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8572 if (inst.cond == COND_ALWAYS)
8573 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
8575 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
8579 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
8582 /* ARM V5 branch-link-exchange instruction (argument parse)
8583 BLX <target_addr> ie BLX(1)
8584 BLX{<condition>} <Rm> ie BLX(2)
8585 Unfortunately, there are two different opcodes for this mnemonic.
8586 So, the insns[].value is not used, and the code here zaps values
8587 into inst.instruction.
8588 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
8593 if (inst.operands[0].isreg)
8595 /* Arg is a register; the opcode provided by insns[] is correct.
8596 It is not illegal to do "blx pc", just useless. */
8597 if (inst.operands[0].reg == REG_PC)
8598 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
8600 inst.instruction |= inst.operands[0].reg;
8604 /* Arg is an address; this instruction cannot be executed
8605 conditionally, and the opcode must be adjusted.
8606 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
8607 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead. */
8608 constraint (inst.cond != COND_ALWAYS, BAD_COND);
8609 inst.instruction = 0xfa000000;
8610 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
8617 bfd_boolean want_reloc;
8619 if (inst.operands[0].reg == REG_PC)
8620 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
8622 inst.instruction |= inst.operands[0].reg;
8623 /* Output R_ARM_V4BX relocations if is an EABI object that looks like
8624 it is for ARMv4t or earlier. */
8625 want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
8626 if (!ARM_FEATURE_ZERO (selected_object_arch)
8627 && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
8631 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
8636 inst.reloc.type = BFD_RELOC_ARM_V4BX;
8640 /* ARM v5TEJ. Jump to Jazelle code. */
8645 if (inst.operands[0].reg == REG_PC)
8646 as_tsktsk (_("use of r15 in bxj is not really useful"));
8648 inst.instruction |= inst.operands[0].reg;
8651 /* Co-processor data operation:
8652 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
8653 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
8657 inst.instruction |= inst.operands[0].reg << 8;
8658 inst.instruction |= inst.operands[1].imm << 20;
8659 inst.instruction |= inst.operands[2].reg << 12;
8660 inst.instruction |= inst.operands[3].reg << 16;
8661 inst.instruction |= inst.operands[4].reg;
8662 inst.instruction |= inst.operands[5].imm << 5;
8668 inst.instruction |= inst.operands[0].reg << 16;
8669 encode_arm_shifter_operand (1);
8672 /* Transfer between coprocessor and ARM registers.
8673 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
8678 No special properties. */
8680 struct deprecated_coproc_regs_s
8687 arm_feature_set deprecated;
8688 arm_feature_set obsoleted;
8689 const char *dep_msg;
8690 const char *obs_msg;
8693 #define DEPR_ACCESS_V8 \
8694 N_("This coprocessor register access is deprecated in ARMv8")
8696 /* Table of all deprecated coprocessor registers. */
8697 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
8699 {15, 0, 7, 10, 5, /* CP15DMB. */
8700 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8701 DEPR_ACCESS_V8, NULL},
8702 {15, 0, 7, 10, 4, /* CP15DSB. */
8703 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8704 DEPR_ACCESS_V8, NULL},
8705 {15, 0, 7, 5, 4, /* CP15ISB. */
8706 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8707 DEPR_ACCESS_V8, NULL},
8708 {14, 6, 1, 0, 0, /* TEEHBR. */
8709 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8710 DEPR_ACCESS_V8, NULL},
8711 {14, 6, 0, 0, 0, /* TEECR. */
8712 ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
8713 DEPR_ACCESS_V8, NULL},
8716 #undef DEPR_ACCESS_V8
8718 static const size_t deprecated_coproc_reg_count =
8719 sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
8727 Rd = inst.operands[2].reg;
8730 if (inst.instruction == 0xee000010
8731 || inst.instruction == 0xfe000010)
8733 reject_bad_reg (Rd);
8734 else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8736 constraint (Rd == REG_SP, BAD_SP);
8741 if (inst.instruction == 0xe000010)
8742 constraint (Rd == REG_PC, BAD_PC);
8745 for (i = 0; i < deprecated_coproc_reg_count; ++i)
8747 const struct deprecated_coproc_regs_s *r =
8748 deprecated_coproc_regs + i;
8750 if (inst.operands[0].reg == r->cp
8751 && inst.operands[1].imm == r->opc1
8752 && inst.operands[3].reg == r->crn
8753 && inst.operands[4].reg == r->crm
8754 && inst.operands[5].imm == r->opc2)
8756 if (! ARM_CPU_IS_ANY (cpu_variant)
8757 && warn_on_deprecated
8758 && ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
8759 as_tsktsk ("%s", r->dep_msg);
8763 inst.instruction |= inst.operands[0].reg << 8;
8764 inst.instruction |= inst.operands[1].imm << 21;
8765 inst.instruction |= Rd << 12;
8766 inst.instruction |= inst.operands[3].reg << 16;
8767 inst.instruction |= inst.operands[4].reg;
8768 inst.instruction |= inst.operands[5].imm << 5;
8771 /* Transfer between coprocessor register and pair of ARM registers.
8772 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
8777 Two XScale instructions are special cases of these:
8779 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
8780 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
8782 Result unpredictable if Rd or Rn is R15. */
8789 Rd = inst.operands[2].reg;
8790 Rn = inst.operands[3].reg;
8794 reject_bad_reg (Rd);
8795 reject_bad_reg (Rn);
8799 constraint (Rd == REG_PC, BAD_PC);
8800 constraint (Rn == REG_PC, BAD_PC);
8803 /* Only check the MRRC{2} variants. */
8804 if ((inst.instruction & 0x0FF00000) == 0x0C500000)
8806 /* If Rd == Rn, error that the operation is
8807 unpredictable (example MRRC p3,#1,r1,r1,c4). */
8808 constraint (Rd == Rn, BAD_OVERLAP);
8811 inst.instruction |= inst.operands[0].reg << 8;
8812 inst.instruction |= inst.operands[1].imm << 4;
8813 inst.instruction |= Rd << 12;
8814 inst.instruction |= Rn << 16;
8815 inst.instruction |= inst.operands[4].reg;
8821 inst.instruction |= inst.operands[0].imm << 6;
8822 if (inst.operands[1].present)
8824 inst.instruction |= CPSI_MMOD;
8825 inst.instruction |= inst.operands[1].imm;
8832 inst.instruction |= inst.operands[0].imm;
8838 unsigned Rd, Rn, Rm;
8840 Rd = inst.operands[0].reg;
8841 Rn = (inst.operands[1].present
8842 ? inst.operands[1].reg : Rd);
8843 Rm = inst.operands[2].reg;
8845 constraint ((Rd == REG_PC), BAD_PC);
8846 constraint ((Rn == REG_PC), BAD_PC);
8847 constraint ((Rm == REG_PC), BAD_PC);
8849 inst.instruction |= Rd << 16;
8850 inst.instruction |= Rn << 0;
8851 inst.instruction |= Rm << 8;
8857 /* There is no IT instruction in ARM mode. We
8858 process it to do the validation as if in
8859 thumb mode, just in case the code gets
8860 assembled for thumb using the unified syntax. */
8865 set_it_insn_type (IT_INSN);
8866 now_it.mask = (inst.instruction & 0xf) | 0x10;
8867 now_it.cc = inst.operands[0].imm;
8871 /* If there is only one register in the register list,
8872 then return its register number. Otherwise return -1. */
8874 only_one_reg_in_list (int range)
8876 int i = ffs (range) - 1;
8877 return (i > 15 || range != (1 << i)) ? -1 : i;
8881 encode_ldmstm(int from_push_pop_mnem)
8883 int base_reg = inst.operands[0].reg;
8884 int range = inst.operands[1].imm;
8887 inst.instruction |= base_reg << 16;
8888 inst.instruction |= range;
8890 if (inst.operands[1].writeback)
8891 inst.instruction |= LDM_TYPE_2_OR_3;
8893 if (inst.operands[0].writeback)
8895 inst.instruction |= WRITE_BACK;
8896 /* Check for unpredictable uses of writeback. */
8897 if (inst.instruction & LOAD_BIT)
8899 /* Not allowed in LDM type 2. */
8900 if ((inst.instruction & LDM_TYPE_2_OR_3)
8901 && ((range & (1 << REG_PC)) == 0))
8902 as_warn (_("writeback of base register is UNPREDICTABLE"));
8903 /* Only allowed if base reg not in list for other types. */
8904 else if (range & (1 << base_reg))
8905 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
8909 /* Not allowed for type 2. */
8910 if (inst.instruction & LDM_TYPE_2_OR_3)
8911 as_warn (_("writeback of base register is UNPREDICTABLE"));
8912 /* Only allowed if base reg not in list, or first in list. */
8913 else if ((range & (1 << base_reg))
8914 && (range & ((1 << base_reg) - 1)))
8915 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
8919 /* If PUSH/POP has only one register, then use the A2 encoding. */
8920 one_reg = only_one_reg_in_list (range);
8921 if (from_push_pop_mnem && one_reg >= 0)
8923 int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
8925 if (is_push && one_reg == 13 /* SP */)
8926 /* PR 22483: The A2 encoding cannot be used when
8927 pushing the stack pointer as this is UNPREDICTABLE. */
8930 inst.instruction &= A_COND_MASK;
8931 inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
8932 inst.instruction |= one_reg << 12;
8939 encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
8942 /* ARMv5TE load-consecutive (argument parse)
8951 constraint (inst.operands[0].reg % 2 != 0,
8952 _("first transfer register must be even"));
8953 constraint (inst.operands[1].present
8954 && inst.operands[1].reg != inst.operands[0].reg + 1,
8955 _("can only transfer two consecutive registers"));
8956 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
8957 constraint (!inst.operands[2].isreg, _("'[' expected"));
8959 if (!inst.operands[1].present)
8960 inst.operands[1].reg = inst.operands[0].reg + 1;
8962 /* encode_arm_addr_mode_3 will diagnose overlap between the base
8963 register and the first register written; we have to diagnose
8964 overlap between the base and the second register written here. */
8966 if (inst.operands[2].reg == inst.operands[1].reg
8967 && (inst.operands[2].writeback || inst.operands[2].postind))
8968 as_warn (_("base register written back, and overlaps "
8969 "second transfer register"));
8971 if (!(inst.instruction & V4_STR_BIT))
8973 /* For an index-register load, the index register must not overlap the
8974 destination (even if not write-back). */
8975 if (inst.operands[2].immisreg
8976 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
8977 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
8978 as_warn (_("index register overlaps transfer register"));
8980 inst.instruction |= inst.operands[0].reg << 12;
8981 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
8987 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8988 || inst.operands[1].postind || inst.operands[1].writeback
8989 || inst.operands[1].immisreg || inst.operands[1].shifted
8990 || inst.operands[1].negative
8991 /* This can arise if the programmer has written
8993 or if they have mistakenly used a register name as the last
8996 It is very difficult to distinguish between these two cases
8997 because "rX" might actually be a label. ie the register
8998 name has been occluded by a symbol of the same name. So we
8999 just generate a general 'bad addressing mode' type error
9000 message and leave it up to the programmer to discover the
9001 true cause and fix their mistake. */
9002 || (inst.operands[1].reg == REG_PC),
9005 constraint (inst.reloc.exp.X_op != O_constant
9006 || inst.reloc.exp.X_add_number != 0,
9007 _("offset must be zero in ARM encoding"));
9009 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9011 inst.instruction |= inst.operands[0].reg << 12;
9012 inst.instruction |= inst.operands[1].reg << 16;
9013 inst.reloc.type = BFD_RELOC_UNUSED;
9019 constraint (inst.operands[0].reg % 2 != 0,
9020 _("even register required"));
9021 constraint (inst.operands[1].present
9022 && inst.operands[1].reg != inst.operands[0].reg + 1,
9023 _("can only load two consecutive registers"));
9024 /* If op 1 were present and equal to PC, this function wouldn't
9025 have been called in the first place. */
9026 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9028 inst.instruction |= inst.operands[0].reg << 12;
9029 inst.instruction |= inst.operands[2].reg << 16;
9032 /* In both ARM and thumb state 'ldr pc, #imm' with an immediate
9033 which is not a multiple of four is UNPREDICTABLE. */
9035 check_ldr_r15_aligned (void)
9037 constraint (!(inst.operands[1].immisreg)
9038 && (inst.operands[0].reg == REG_PC
9039 && inst.operands[1].reg == REG_PC
9040 && (inst.reloc.exp.X_add_number & 0x3)),
9041 _("ldr to register 15 must be 4-byte aligned"));
9047 inst.instruction |= inst.operands[0].reg << 12;
9048 if (!inst.operands[1].isreg)
9049 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9051 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9052 check_ldr_r15_aligned ();
9058 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9060 if (inst.operands[1].preind)
9062 constraint (inst.reloc.exp.X_op != O_constant
9063 || inst.reloc.exp.X_add_number != 0,
9064 _("this instruction requires a post-indexed address"));
9066 inst.operands[1].preind = 0;
9067 inst.operands[1].postind = 1;
9068 inst.operands[1].writeback = 1;
9070 inst.instruction |= inst.operands[0].reg << 12;
9071 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9074 /* Halfword and signed-byte load/store operations. */
9079 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9080 inst.instruction |= inst.operands[0].reg << 12;
9081 if (!inst.operands[1].isreg)
9082 if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9084 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9090 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
9092 if (inst.operands[1].preind)
9094 constraint (inst.reloc.exp.X_op != O_constant
9095 || inst.reloc.exp.X_add_number != 0,
9096 _("this instruction requires a post-indexed address"));
9098 inst.operands[1].preind = 0;
9099 inst.operands[1].postind = 1;
9100 inst.operands[1].writeback = 1;
9102 inst.instruction |= inst.operands[0].reg << 12;
9103 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9106 /* Co-processor register load/store.
9107 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
9111 inst.instruction |= inst.operands[0].reg << 8;
9112 inst.instruction |= inst.operands[1].reg << 12;
9113 encode_arm_cp_address (2, TRUE, TRUE, 0);
9119 /* This restriction does not apply to mls (nor to mla in v6 or later). */
9120 if (inst.operands[0].reg == inst.operands[1].reg
9121 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9122 && !(inst.instruction & 0x00400000))
9123 as_tsktsk (_("Rd and Rm should be different in mla"));
9125 inst.instruction |= inst.operands[0].reg << 16;
9126 inst.instruction |= inst.operands[1].reg;
9127 inst.instruction |= inst.operands[2].reg << 8;
9128 inst.instruction |= inst.operands[3].reg << 12;
9134 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9135 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9137 inst.instruction |= inst.operands[0].reg << 12;
9138 encode_arm_shifter_operand (1);
9141 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
9148 top = (inst.instruction & 0x00400000) != 0;
9149 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
9150 _(":lower16: not allowed in this instruction"));
9151 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
9152 _(":upper16: not allowed in this instruction"));
9153 inst.instruction |= inst.operands[0].reg << 12;
9154 if (inst.reloc.type == BFD_RELOC_UNUSED)
9156 imm = inst.reloc.exp.X_add_number;
9157 /* The value is in two pieces: 0:11, 16:19. */
9158 inst.instruction |= (imm & 0x00000fff);
9159 inst.instruction |= (imm & 0x0000f000) << 4;
9164 do_vfp_nsyn_mrs (void)
9166 if (inst.operands[0].isvec)
9168 if (inst.operands[1].reg != 1)
9169 first_error (_("operand 1 must be FPSCR"));
9170 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
9171 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
9172 do_vfp_nsyn_opcode ("fmstat");
9174 else if (inst.operands[1].isvec)
9175 do_vfp_nsyn_opcode ("fmrx");
9183 do_vfp_nsyn_msr (void)
9185 if (inst.operands[0].isvec)
9186 do_vfp_nsyn_opcode ("fmxr");
9196 unsigned Rt = inst.operands[0].reg;
9198 if (thumb_mode && Rt == REG_SP)
9200 inst.error = BAD_SP;
9204 /* MVFR2 is only valid at ARMv8-A. */
9205 if (inst.operands[1].reg == 5)
9206 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9209 /* APSR_ sets isvec. All other refs to PC are illegal. */
9210 if (!inst.operands[0].isvec && Rt == REG_PC)
9212 inst.error = BAD_PC;
9216 /* If we get through parsing the register name, we just insert the number
9217 generated into the instruction without further validation. */
9218 inst.instruction |= (inst.operands[1].reg << 16);
9219 inst.instruction |= (Rt << 12);
9225 unsigned Rt = inst.operands[1].reg;
9228 reject_bad_reg (Rt);
9229 else if (Rt == REG_PC)
9231 inst.error = BAD_PC;
9235 /* MVFR2 is only valid for ARMv8-A. */
9236 if (inst.operands[0].reg == 5)
9237 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
9240 /* If we get through parsing the register name, we just insert the number
9241 generated into the instruction without further validation. */
9242 inst.instruction |= (inst.operands[0].reg << 16);
9243 inst.instruction |= (Rt << 12);
9251 if (do_vfp_nsyn_mrs () == SUCCESS)
9254 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9255 inst.instruction |= inst.operands[0].reg << 12;
9257 if (inst.operands[1].isreg)
9259 br = inst.operands[1].reg;
9260 if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
9261 as_bad (_("bad register for mrs"));
9265 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9266 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
9268 _("'APSR', 'CPSR' or 'SPSR' expected"));
9269 br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
9272 inst.instruction |= br;
9275 /* Two possible forms:
9276 "{C|S}PSR_<field>, Rm",
9277 "{C|S}PSR_f, #expression". */
9282 if (do_vfp_nsyn_msr () == SUCCESS)
9285 inst.instruction |= inst.operands[0].imm;
9286 if (inst.operands[1].isreg)
9287 inst.instruction |= inst.operands[1].reg;
9290 inst.instruction |= INST_IMMEDIATE;
9291 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
9292 inst.reloc.pc_rel = 0;
9299 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
9301 if (!inst.operands[2].present)
9302 inst.operands[2].reg = inst.operands[0].reg;
9303 inst.instruction |= inst.operands[0].reg << 16;
9304 inst.instruction |= inst.operands[1].reg;
9305 inst.instruction |= inst.operands[2].reg << 8;
9307 if (inst.operands[0].reg == inst.operands[1].reg
9308 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9309 as_tsktsk (_("Rd and Rm should be different in mul"));
9312 /* Long Multiply Parser
9313 UMULL RdLo, RdHi, Rm, Rs
9314 SMULL RdLo, RdHi, Rm, Rs
9315 UMLAL RdLo, RdHi, Rm, Rs
9316 SMLAL RdLo, RdHi, Rm, Rs. */
9321 inst.instruction |= inst.operands[0].reg << 12;
9322 inst.instruction |= inst.operands[1].reg << 16;
9323 inst.instruction |= inst.operands[2].reg;
9324 inst.instruction |= inst.operands[3].reg << 8;
9326 /* rdhi and rdlo must be different. */
9327 if (inst.operands[0].reg == inst.operands[1].reg)
9328 as_tsktsk (_("rdhi and rdlo must be different"));
9330 /* rdhi, rdlo and rm must all be different before armv6. */
9331 if ((inst.operands[0].reg == inst.operands[2].reg
9332 || inst.operands[1].reg == inst.operands[2].reg)
9333 && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
9334 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
9340 if (inst.operands[0].present
9341 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
9343 /* Architectural NOP hints are CPSR sets with no bits selected. */
9344 inst.instruction &= 0xf0000000;
9345 inst.instruction |= 0x0320f000;
9346 if (inst.operands[0].present)
9347 inst.instruction |= inst.operands[0].imm;
9351 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
9352 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
9353 Condition defaults to COND_ALWAYS.
9354 Error if Rd, Rn or Rm are R15. */
9359 inst.instruction |= inst.operands[0].reg << 12;
9360 inst.instruction |= inst.operands[1].reg << 16;
9361 inst.instruction |= inst.operands[2].reg;
9362 if (inst.operands[3].present)
9363 encode_arm_shift (3);
9366 /* ARM V6 PKHTB (Argument Parse). */
9371 if (!inst.operands[3].present)
9373 /* If the shift specifier is omitted, turn the instruction
9374 into pkhbt rd, rm, rn. */
9375 inst.instruction &= 0xfff00010;
9376 inst.instruction |= inst.operands[0].reg << 12;
9377 inst.instruction |= inst.operands[1].reg;
9378 inst.instruction |= inst.operands[2].reg << 16;
9382 inst.instruction |= inst.operands[0].reg << 12;
9383 inst.instruction |= inst.operands[1].reg << 16;
9384 inst.instruction |= inst.operands[2].reg;
9385 encode_arm_shift (3);
9389 /* ARMv5TE: Preload-Cache
9390 MP Extensions: Preload for write
9394 Syntactically, like LDR with B=1, W=0, L=1. */
9399 constraint (!inst.operands[0].isreg,
9400 _("'[' expected after PLD mnemonic"));
9401 constraint (inst.operands[0].postind,
9402 _("post-indexed expression used in preload instruction"));
9403 constraint (inst.operands[0].writeback,
9404 _("writeback used in preload instruction"));
9405 constraint (!inst.operands[0].preind,
9406 _("unindexed addressing used in preload instruction"));
9407 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9410 /* ARMv7: PLI <addr_mode> */
9414 constraint (!inst.operands[0].isreg,
9415 _("'[' expected after PLI mnemonic"));
9416 constraint (inst.operands[0].postind,
9417 _("post-indexed expression used in preload instruction"));
9418 constraint (inst.operands[0].writeback,
9419 _("writeback used in preload instruction"));
9420 constraint (!inst.operands[0].preind,
9421 _("unindexed addressing used in preload instruction"));
9422 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
9423 inst.instruction &= ~PRE_INDEX;
9429 constraint (inst.operands[0].writeback,
9430 _("push/pop do not support {reglist}^"));
9431 inst.operands[1] = inst.operands[0];
9432 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
9433 inst.operands[0].isreg = 1;
9434 inst.operands[0].writeback = 1;
9435 inst.operands[0].reg = REG_SP;
9436 encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
9439 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
9440 word at the specified address and the following word
9442 Unconditionally executed.
9443 Error if Rn is R15. */
9448 inst.instruction |= inst.operands[0].reg << 16;
9449 if (inst.operands[0].writeback)
9450 inst.instruction |= WRITE_BACK;
9453 /* ARM V6 ssat (argument parse). */
9458 inst.instruction |= inst.operands[0].reg << 12;
9459 inst.instruction |= (inst.operands[1].imm - 1) << 16;
9460 inst.instruction |= inst.operands[2].reg;
9462 if (inst.operands[3].present)
9463 encode_arm_shift (3);
9466 /* ARM V6 usat (argument parse). */
9471 inst.instruction |= inst.operands[0].reg << 12;
9472 inst.instruction |= inst.operands[1].imm << 16;
9473 inst.instruction |= inst.operands[2].reg;
9475 if (inst.operands[3].present)
9476 encode_arm_shift (3);
9479 /* ARM V6 ssat16 (argument parse). */
9484 inst.instruction |= inst.operands[0].reg << 12;
9485 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
9486 inst.instruction |= inst.operands[2].reg;
9492 inst.instruction |= inst.operands[0].reg << 12;
9493 inst.instruction |= inst.operands[1].imm << 16;
9494 inst.instruction |= inst.operands[2].reg;
9497 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
9498 preserving the other bits.
9500 setend <endian_specifier>, where <endian_specifier> is either
9506 if (warn_on_deprecated
9507 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9508 as_tsktsk (_("setend use is deprecated for ARMv8"));
9510 if (inst.operands[0].imm)
9511 inst.instruction |= 0x200;
9517 unsigned int Rm = (inst.operands[1].present
9518 ? inst.operands[1].reg
9519 : inst.operands[0].reg);
9521 inst.instruction |= inst.operands[0].reg << 12;
9522 inst.instruction |= Rm;
9523 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
9525 inst.instruction |= inst.operands[2].reg << 8;
9526 inst.instruction |= SHIFT_BY_REG;
9527 /* PR 12854: Error on extraneous shifts. */
9528 constraint (inst.operands[2].shifted,
9529 _("extraneous shift as part of operand to shift insn"));
9532 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
9538 inst.reloc.type = BFD_RELOC_ARM_SMC;
9539 inst.reloc.pc_rel = 0;
9545 inst.reloc.type = BFD_RELOC_ARM_HVC;
9546 inst.reloc.pc_rel = 0;
9552 inst.reloc.type = BFD_RELOC_ARM_SWI;
9553 inst.reloc.pc_rel = 0;
9559 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9560 _("selected processor does not support SETPAN instruction"));
9562 inst.instruction |= ((inst.operands[0].imm & 1) << 9);
9568 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
9569 _("selected processor does not support SETPAN instruction"));
9571 inst.instruction |= (inst.operands[0].imm << 3);
9574 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
9575 SMLAxy{cond} Rd,Rm,Rs,Rn
9576 SMLAWy{cond} Rd,Rm,Rs,Rn
9577 Error if any register is R15. */
9582 inst.instruction |= inst.operands[0].reg << 16;
9583 inst.instruction |= inst.operands[1].reg;
9584 inst.instruction |= inst.operands[2].reg << 8;
9585 inst.instruction |= inst.operands[3].reg << 12;
9588 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
9589 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
9590 Error if any register is R15.
9591 Warning if Rdlo == Rdhi. */
9596 inst.instruction |= inst.operands[0].reg << 12;
9597 inst.instruction |= inst.operands[1].reg << 16;
9598 inst.instruction |= inst.operands[2].reg;
9599 inst.instruction |= inst.operands[3].reg << 8;
9601 if (inst.operands[0].reg == inst.operands[1].reg)
9602 as_tsktsk (_("rdhi and rdlo must be different"));
9605 /* ARM V5E (El Segundo) signed-multiply (argument parse)
9606 SMULxy{cond} Rd,Rm,Rs
9607 Error if any register is R15. */
9612 inst.instruction |= inst.operands[0].reg << 16;
9613 inst.instruction |= inst.operands[1].reg;
9614 inst.instruction |= inst.operands[2].reg << 8;
9617 /* ARM V6 srs (argument parse). The variable fields in the encoding are
9618 the same for both ARM and Thumb-2. */
9625 if (inst.operands[0].present)
9627 reg = inst.operands[0].reg;
9628 constraint (reg != REG_SP, _("SRS base register must be r13"));
9633 inst.instruction |= reg << 16;
9634 inst.instruction |= inst.operands[1].imm;
9635 if (inst.operands[0].writeback || inst.operands[1].writeback)
9636 inst.instruction |= WRITE_BACK;
9639 /* ARM V6 strex (argument parse). */
9644 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9645 || inst.operands[2].postind || inst.operands[2].writeback
9646 || inst.operands[2].immisreg || inst.operands[2].shifted
9647 || inst.operands[2].negative
9648 /* See comment in do_ldrex(). */
9649 || (inst.operands[2].reg == REG_PC),
9652 constraint (inst.operands[0].reg == inst.operands[1].reg
9653 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9655 constraint (inst.reloc.exp.X_op != O_constant
9656 || inst.reloc.exp.X_add_number != 0,
9657 _("offset must be zero in ARM encoding"));
9659 inst.instruction |= inst.operands[0].reg << 12;
9660 inst.instruction |= inst.operands[1].reg;
9661 inst.instruction |= inst.operands[2].reg << 16;
9662 inst.reloc.type = BFD_RELOC_UNUSED;
9668 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9669 || inst.operands[2].postind || inst.operands[2].writeback
9670 || inst.operands[2].immisreg || inst.operands[2].shifted
9671 || inst.operands[2].negative,
9674 constraint (inst.operands[0].reg == inst.operands[1].reg
9675 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9683 constraint (inst.operands[1].reg % 2 != 0,
9684 _("even register required"));
9685 constraint (inst.operands[2].present
9686 && inst.operands[2].reg != inst.operands[1].reg + 1,
9687 _("can only store two consecutive registers"));
9688 /* If op 2 were present and equal to PC, this function wouldn't
9689 have been called in the first place. */
9690 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
9692 constraint (inst.operands[0].reg == inst.operands[1].reg
9693 || inst.operands[0].reg == inst.operands[1].reg + 1
9694 || inst.operands[0].reg == inst.operands[3].reg,
9697 inst.instruction |= inst.operands[0].reg << 12;
9698 inst.instruction |= inst.operands[1].reg;
9699 inst.instruction |= inst.operands[3].reg << 16;
9706 constraint (inst.operands[0].reg == inst.operands[1].reg
9707 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9715 constraint (inst.operands[0].reg == inst.operands[1].reg
9716 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
9721 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
9722 extends it to 32-bits, and adds the result to a value in another
9723 register. You can specify a rotation by 0, 8, 16, or 24 bits
9724 before extracting the 16-bit value.
9725 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
9726 Condition defaults to COND_ALWAYS.
9727 Error if any register uses R15. */
9732 inst.instruction |= inst.operands[0].reg << 12;
9733 inst.instruction |= inst.operands[1].reg << 16;
9734 inst.instruction |= inst.operands[2].reg;
9735 inst.instruction |= inst.operands[3].imm << 10;
9740 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
9741 Condition defaults to COND_ALWAYS.
9742 Error if any register uses R15. */
9747 inst.instruction |= inst.operands[0].reg << 12;
9748 inst.instruction |= inst.operands[1].reg;
9749 inst.instruction |= inst.operands[2].imm << 10;
9752 /* VFP instructions. In a logical order: SP variant first, monad
9753 before dyad, arithmetic then move then load/store. */
9756 do_vfp_sp_monadic (void)
9758 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9759 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9763 do_vfp_sp_dyadic (void)
9765 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9766 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9767 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9771 do_vfp_sp_compare_z (void)
9773 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9777 do_vfp_dp_sp_cvt (void)
9779 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9780 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
9784 do_vfp_sp_dp_cvt (void)
9786 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9787 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9791 do_vfp_reg_from_sp (void)
9793 inst.instruction |= inst.operands[0].reg << 12;
9794 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
9798 do_vfp_reg2_from_sp2 (void)
9800 constraint (inst.operands[2].imm != 2,
9801 _("only two consecutive VFP SP registers allowed here"));
9802 inst.instruction |= inst.operands[0].reg << 12;
9803 inst.instruction |= inst.operands[1].reg << 16;
9804 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
9808 do_vfp_sp_from_reg (void)
9810 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
9811 inst.instruction |= inst.operands[1].reg << 12;
9815 do_vfp_sp2_from_reg2 (void)
9817 constraint (inst.operands[0].imm != 2,
9818 _("only two consecutive VFP SP registers allowed here"));
9819 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
9820 inst.instruction |= inst.operands[1].reg << 12;
9821 inst.instruction |= inst.operands[2].reg << 16;
9825 do_vfp_sp_ldst (void)
9827 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9828 encode_arm_cp_address (1, FALSE, TRUE, 0);
9832 do_vfp_dp_ldst (void)
9834 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9835 encode_arm_cp_address (1, FALSE, TRUE, 0);
9840 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
9842 if (inst.operands[0].writeback)
9843 inst.instruction |= WRITE_BACK;
9845 constraint (ldstm_type != VFP_LDSTMIA,
9846 _("this addressing mode requires base-register writeback"));
9847 inst.instruction |= inst.operands[0].reg << 16;
9848 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
9849 inst.instruction |= inst.operands[1].imm;
9853 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
9857 if (inst.operands[0].writeback)
9858 inst.instruction |= WRITE_BACK;
9860 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
9861 _("this addressing mode requires base-register writeback"));
9863 inst.instruction |= inst.operands[0].reg << 16;
9864 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9866 count = inst.operands[1].imm << 1;
9867 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
9870 inst.instruction |= count;
9874 do_vfp_sp_ldstmia (void)
9876 vfp_sp_ldstm (VFP_LDSTMIA);
9880 do_vfp_sp_ldstmdb (void)
9882 vfp_sp_ldstm (VFP_LDSTMDB);
9886 do_vfp_dp_ldstmia (void)
9888 vfp_dp_ldstm (VFP_LDSTMIA);
9892 do_vfp_dp_ldstmdb (void)
9894 vfp_dp_ldstm (VFP_LDSTMDB);
9898 do_vfp_xp_ldstmia (void)
9900 vfp_dp_ldstm (VFP_LDSTMIAX);
9904 do_vfp_xp_ldstmdb (void)
9906 vfp_dp_ldstm (VFP_LDSTMDBX);
9910 do_vfp_dp_rd_rm (void)
9912 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9913 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
9917 do_vfp_dp_rn_rd (void)
9919 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
9920 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9924 do_vfp_dp_rd_rn (void)
9926 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9927 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9931 do_vfp_dp_rd_rn_rm (void)
9933 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9934 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
9935 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
9941 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9945 do_vfp_dp_rm_rd_rn (void)
9947 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
9948 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
9949 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
9952 /* VFPv3 instructions. */
9954 do_vfp_sp_const (void)
9956 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
9957 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9958 inst.instruction |= (inst.operands[1].imm & 0x0f);
9962 do_vfp_dp_const (void)
9964 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
9965 inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
9966 inst.instruction |= (inst.operands[1].imm & 0x0f);
9970 vfp_conv (int srcsize)
9972 int immbits = srcsize - inst.operands[1].imm;
9974 if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
9976 /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
9977 i.e. immbits must be in range 0 - 16. */
9978 inst.error = _("immediate value out of range, expected range [0, 16]");
9981 else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
9983 /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
9984 i.e. immbits must be in range 0 - 31. */
9985 inst.error = _("immediate value out of range, expected range [1, 32]");
9989 inst.instruction |= (immbits & 1) << 5;
9990 inst.instruction |= (immbits >> 1);
9994 do_vfp_sp_conv_16 (void)
9996 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10001 do_vfp_dp_conv_16 (void)
10003 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10008 do_vfp_sp_conv_32 (void)
10010 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10015 do_vfp_dp_conv_32 (void)
10017 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10021 /* FPA instructions. Also in a logical order. */
10026 inst.instruction |= inst.operands[0].reg << 16;
10027 inst.instruction |= inst.operands[1].reg;
10031 do_fpa_ldmstm (void)
10033 inst.instruction |= inst.operands[0].reg << 12;
10034 switch (inst.operands[1].imm)
10036 case 1: inst.instruction |= CP_T_X; break;
10037 case 2: inst.instruction |= CP_T_Y; break;
10038 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10043 if (inst.instruction & (PRE_INDEX | INDEX_UP))
10045 /* The instruction specified "ea" or "fd", so we can only accept
10046 [Rn]{!}. The instruction does not really support stacking or
10047 unstacking, so we have to emulate these by setting appropriate
10048 bits and offsets. */
10049 constraint (inst.reloc.exp.X_op != O_constant
10050 || inst.reloc.exp.X_add_number != 0,
10051 _("this instruction does not support indexing"));
10053 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
10054 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
10056 if (!(inst.instruction & INDEX_UP))
10057 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
10059 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
10061 inst.operands[2].preind = 0;
10062 inst.operands[2].postind = 1;
10066 encode_arm_cp_address (2, TRUE, TRUE, 0);
10069 /* iWMMXt instructions: strictly in alphabetical order. */
10072 do_iwmmxt_tandorc (void)
10074 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
10078 do_iwmmxt_textrc (void)
10080 inst.instruction |= inst.operands[0].reg << 12;
10081 inst.instruction |= inst.operands[1].imm;
10085 do_iwmmxt_textrm (void)
10087 inst.instruction |= inst.operands[0].reg << 12;
10088 inst.instruction |= inst.operands[1].reg << 16;
10089 inst.instruction |= inst.operands[2].imm;
10093 do_iwmmxt_tinsr (void)
10095 inst.instruction |= inst.operands[0].reg << 16;
10096 inst.instruction |= inst.operands[1].reg << 12;
10097 inst.instruction |= inst.operands[2].imm;
10101 do_iwmmxt_tmia (void)
10103 inst.instruction |= inst.operands[0].reg << 5;
10104 inst.instruction |= inst.operands[1].reg;
10105 inst.instruction |= inst.operands[2].reg << 12;
10109 do_iwmmxt_waligni (void)
10111 inst.instruction |= inst.operands[0].reg << 12;
10112 inst.instruction |= inst.operands[1].reg << 16;
10113 inst.instruction |= inst.operands[2].reg;
10114 inst.instruction |= inst.operands[3].imm << 20;
10118 do_iwmmxt_wmerge (void)
10120 inst.instruction |= inst.operands[0].reg << 12;
10121 inst.instruction |= inst.operands[1].reg << 16;
10122 inst.instruction |= inst.operands[2].reg;
10123 inst.instruction |= inst.operands[3].imm << 21;
10127 do_iwmmxt_wmov (void)
10129 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
10130 inst.instruction |= inst.operands[0].reg << 12;
10131 inst.instruction |= inst.operands[1].reg << 16;
10132 inst.instruction |= inst.operands[1].reg;
10136 do_iwmmxt_wldstbh (void)
10139 inst.instruction |= inst.operands[0].reg << 12;
10141 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
10143 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
10144 encode_arm_cp_address (1, TRUE, FALSE, reloc);
10148 do_iwmmxt_wldstw (void)
10150 /* RIWR_RIWC clears .isreg for a control register. */
10151 if (!inst.operands[0].isreg)
10153 constraint (inst.cond != COND_ALWAYS, BAD_COND);
10154 inst.instruction |= 0xf0000000;
10157 inst.instruction |= inst.operands[0].reg << 12;
10158 encode_arm_cp_address (1, TRUE, TRUE, 0);
10162 do_iwmmxt_wldstd (void)
10164 inst.instruction |= inst.operands[0].reg << 12;
10165 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
10166 && inst.operands[1].immisreg)
10168 inst.instruction &= ~0x1a000ff;
10169 inst.instruction |= (0xfU << 28);
10170 if (inst.operands[1].preind)
10171 inst.instruction |= PRE_INDEX;
10172 if (!inst.operands[1].negative)
10173 inst.instruction |= INDEX_UP;
10174 if (inst.operands[1].writeback)
10175 inst.instruction |= WRITE_BACK;
10176 inst.instruction |= inst.operands[1].reg << 16;
10177 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10178 inst.instruction |= inst.operands[1].imm;
10181 encode_arm_cp_address (1, TRUE, FALSE, 0);
10185 do_iwmmxt_wshufh (void)
10187 inst.instruction |= inst.operands[0].reg << 12;
10188 inst.instruction |= inst.operands[1].reg << 16;
10189 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
10190 inst.instruction |= (inst.operands[2].imm & 0x0f);
10194 do_iwmmxt_wzero (void)
10196 /* WZERO reg is an alias for WANDN reg, reg, reg. */
10197 inst.instruction |= inst.operands[0].reg;
10198 inst.instruction |= inst.operands[0].reg << 12;
10199 inst.instruction |= inst.operands[0].reg << 16;
10203 do_iwmmxt_wrwrwr_or_imm5 (void)
10205 if (inst.operands[2].isreg)
10208 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
10209 _("immediate operand requires iWMMXt2"));
10211 if (inst.operands[2].imm == 0)
10213 switch ((inst.instruction >> 20) & 0xf)
10219 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
10220 inst.operands[2].imm = 16;
10221 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
10227 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
10228 inst.operands[2].imm = 32;
10229 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
10236 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
10238 wrn = (inst.instruction >> 16) & 0xf;
10239 inst.instruction &= 0xff0fff0f;
10240 inst.instruction |= wrn;
10241 /* Bail out here; the instruction is now assembled. */
10246 /* Map 32 -> 0, etc. */
10247 inst.operands[2].imm &= 0x1f;
10248 inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
10252 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
10253 operations first, then control, shift, and load/store. */
10255 /* Insns like "foo X,Y,Z". */
10258 do_mav_triple (void)
10260 inst.instruction |= inst.operands[0].reg << 16;
10261 inst.instruction |= inst.operands[1].reg;
10262 inst.instruction |= inst.operands[2].reg << 12;
10265 /* Insns like "foo W,X,Y,Z".
10266 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
10271 inst.instruction |= inst.operands[0].reg << 5;
10272 inst.instruction |= inst.operands[1].reg << 12;
10273 inst.instruction |= inst.operands[2].reg << 16;
10274 inst.instruction |= inst.operands[3].reg;
10277 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
10279 do_mav_dspsc (void)
10281 inst.instruction |= inst.operands[1].reg << 12;
10284 /* Maverick shift immediate instructions.
10285 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
10286 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
10289 do_mav_shift (void)
10291 int imm = inst.operands[2].imm;
10293 inst.instruction |= inst.operands[0].reg << 12;
10294 inst.instruction |= inst.operands[1].reg << 16;
10296 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
10297 Bits 5-7 of the insn should have bits 4-6 of the immediate.
10298 Bit 4 should be 0. */
10299 imm = (imm & 0xf) | ((imm & 0x70) << 1);
10301 inst.instruction |= imm;
10304 /* XScale instructions. Also sorted arithmetic before move. */
10306 /* Xscale multiply-accumulate (argument parse)
10309 MIAxycc acc0,Rm,Rs. */
10314 inst.instruction |= inst.operands[1].reg;
10315 inst.instruction |= inst.operands[2].reg << 12;
10318 /* Xscale move-accumulator-register (argument parse)
10320 MARcc acc0,RdLo,RdHi. */
10325 inst.instruction |= inst.operands[1].reg << 12;
10326 inst.instruction |= inst.operands[2].reg << 16;
10329 /* Xscale move-register-accumulator (argument parse)
10331 MRAcc RdLo,RdHi,acc0. */
10336 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
10337 inst.instruction |= inst.operands[0].reg << 12;
10338 inst.instruction |= inst.operands[1].reg << 16;
10341 /* Encoding functions relevant only to Thumb. */
10343 /* inst.operands[i] is a shifted-register operand; encode
10344 it into inst.instruction in the format used by Thumb32. */
10347 encode_thumb32_shifted_operand (int i)
10349 unsigned int value = inst.reloc.exp.X_add_number;
10350 unsigned int shift = inst.operands[i].shift_kind;
10352 constraint (inst.operands[i].immisreg,
10353 _("shift by register not allowed in thumb mode"));
10354 inst.instruction |= inst.operands[i].reg;
10355 if (shift == SHIFT_RRX)
10356 inst.instruction |= SHIFT_ROR << 4;
10359 constraint (inst.reloc.exp.X_op != O_constant,
10360 _("expression too complex"));
10362 constraint (value > 32
10363 || (value == 32 && (shift == SHIFT_LSL
10364 || shift == SHIFT_ROR)),
10365 _("shift expression is too large"));
10369 else if (value == 32)
10372 inst.instruction |= shift << 4;
10373 inst.instruction |= (value & 0x1c) << 10;
10374 inst.instruction |= (value & 0x03) << 6;
10379 /* inst.operands[i] was set up by parse_address. Encode it into a
10380 Thumb32 format load or store instruction. Reject forms that cannot
10381 be used with such instructions. If is_t is true, reject forms that
10382 cannot be used with a T instruction; if is_d is true, reject forms
10383 that cannot be used with a D instruction. If it is a store insn,
10384 reject PC in Rn. */
10387 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
10389 const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
10391 constraint (!inst.operands[i].isreg,
10392 _("Instruction does not support =N addresses"));
10394 inst.instruction |= inst.operands[i].reg << 16;
10395 if (inst.operands[i].immisreg)
10397 constraint (is_pc, BAD_PC_ADDRESSING);
10398 constraint (is_t || is_d, _("cannot use register index with this instruction"));
10399 constraint (inst.operands[i].negative,
10400 _("Thumb does not support negative register indexing"));
10401 constraint (inst.operands[i].postind,
10402 _("Thumb does not support register post-indexing"));
10403 constraint (inst.operands[i].writeback,
10404 _("Thumb does not support register indexing with writeback"));
10405 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
10406 _("Thumb supports only LSL in shifted register indexing"));
10408 inst.instruction |= inst.operands[i].imm;
10409 if (inst.operands[i].shifted)
10411 constraint (inst.reloc.exp.X_op != O_constant,
10412 _("expression too complex"));
10413 constraint (inst.reloc.exp.X_add_number < 0
10414 || inst.reloc.exp.X_add_number > 3,
10415 _("shift out of range"));
10416 inst.instruction |= inst.reloc.exp.X_add_number << 4;
10418 inst.reloc.type = BFD_RELOC_UNUSED;
10420 else if (inst.operands[i].preind)
10422 constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
10423 constraint (is_t && inst.operands[i].writeback,
10424 _("cannot use writeback with this instruction"));
10425 constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
10426 BAD_PC_ADDRESSING);
10430 inst.instruction |= 0x01000000;
10431 if (inst.operands[i].writeback)
10432 inst.instruction |= 0x00200000;
10436 inst.instruction |= 0x00000c00;
10437 if (inst.operands[i].writeback)
10438 inst.instruction |= 0x00000100;
10440 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10442 else if (inst.operands[i].postind)
10444 gas_assert (inst.operands[i].writeback);
10445 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
10446 constraint (is_t, _("cannot use post-indexing with this instruction"));
10449 inst.instruction |= 0x00200000;
10451 inst.instruction |= 0x00000900;
10452 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
10454 else /* unindexed - only for coprocessor */
10455 inst.error = _("instruction does not accept unindexed addressing");
10458 /* Table of Thumb instructions which exist in both 16- and 32-bit
10459 encodings (the latter only in post-V6T2 cores). The index is the
10460 value used in the insns table below. When there is more than one
10461 possible 16-bit encoding for the instruction, this table always
10463 Also contains several pseudo-instructions used during relaxation. */
10464 #define T16_32_TAB \
10465 X(_adc, 4140, eb400000), \
10466 X(_adcs, 4140, eb500000), \
10467 X(_add, 1c00, eb000000), \
10468 X(_adds, 1c00, eb100000), \
10469 X(_addi, 0000, f1000000), \
10470 X(_addis, 0000, f1100000), \
10471 X(_add_pc,000f, f20f0000), \
10472 X(_add_sp,000d, f10d0000), \
10473 X(_adr, 000f, f20f0000), \
10474 X(_and, 4000, ea000000), \
10475 X(_ands, 4000, ea100000), \
10476 X(_asr, 1000, fa40f000), \
10477 X(_asrs, 1000, fa50f000), \
10478 X(_b, e000, f000b000), \
10479 X(_bcond, d000, f0008000), \
10480 X(_bic, 4380, ea200000), \
10481 X(_bics, 4380, ea300000), \
10482 X(_cmn, 42c0, eb100f00), \
10483 X(_cmp, 2800, ebb00f00), \
10484 X(_cpsie, b660, f3af8400), \
10485 X(_cpsid, b670, f3af8600), \
10486 X(_cpy, 4600, ea4f0000), \
10487 X(_dec_sp,80dd, f1ad0d00), \
10488 X(_eor, 4040, ea800000), \
10489 X(_eors, 4040, ea900000), \
10490 X(_inc_sp,00dd, f10d0d00), \
10491 X(_ldmia, c800, e8900000), \
10492 X(_ldr, 6800, f8500000), \
10493 X(_ldrb, 7800, f8100000), \
10494 X(_ldrh, 8800, f8300000), \
10495 X(_ldrsb, 5600, f9100000), \
10496 X(_ldrsh, 5e00, f9300000), \
10497 X(_ldr_pc,4800, f85f0000), \
10498 X(_ldr_pc2,4800, f85f0000), \
10499 X(_ldr_sp,9800, f85d0000), \
10500 X(_lsl, 0000, fa00f000), \
10501 X(_lsls, 0000, fa10f000), \
10502 X(_lsr, 0800, fa20f000), \
10503 X(_lsrs, 0800, fa30f000), \
10504 X(_mov, 2000, ea4f0000), \
10505 X(_movs, 2000, ea5f0000), \
10506 X(_mul, 4340, fb00f000), \
10507 X(_muls, 4340, ffffffff), /* no 32b muls */ \
10508 X(_mvn, 43c0, ea6f0000), \
10509 X(_mvns, 43c0, ea7f0000), \
10510 X(_neg, 4240, f1c00000), /* rsb #0 */ \
10511 X(_negs, 4240, f1d00000), /* rsbs #0 */ \
10512 X(_orr, 4300, ea400000), \
10513 X(_orrs, 4300, ea500000), \
10514 X(_pop, bc00, e8bd0000), /* ldmia sp!,... */ \
10515 X(_push, b400, e92d0000), /* stmdb sp!,... */ \
10516 X(_rev, ba00, fa90f080), \
10517 X(_rev16, ba40, fa90f090), \
10518 X(_revsh, bac0, fa90f0b0), \
10519 X(_ror, 41c0, fa60f000), \
10520 X(_rors, 41c0, fa70f000), \
10521 X(_sbc, 4180, eb600000), \
10522 X(_sbcs, 4180, eb700000), \
10523 X(_stmia, c000, e8800000), \
10524 X(_str, 6000, f8400000), \
10525 X(_strb, 7000, f8000000), \
10526 X(_strh, 8000, f8200000), \
10527 X(_str_sp,9000, f84d0000), \
10528 X(_sub, 1e00, eba00000), \
10529 X(_subs, 1e00, ebb00000), \
10530 X(_subi, 8000, f1a00000), \
10531 X(_subis, 8000, f1b00000), \
10532 X(_sxtb, b240, fa4ff080), \
10533 X(_sxth, b200, fa0ff080), \
10534 X(_tst, 4200, ea100f00), \
10535 X(_uxtb, b2c0, fa5ff080), \
10536 X(_uxth, b280, fa1ff080), \
10537 X(_nop, bf00, f3af8000), \
10538 X(_yield, bf10, f3af8001), \
10539 X(_wfe, bf20, f3af8002), \
10540 X(_wfi, bf30, f3af8003), \
10541 X(_sev, bf40, f3af8004), \
10542 X(_sevl, bf50, f3af8005), \
10543 X(_udf, de00, f7f0a000)
10545 /* To catch errors in encoding functions, the codes are all offset by
10546 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
10547 as 16-bit instructions. */
10548 #define X(a,b,c) T_MNEM##a
10549 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
10552 #define X(a,b,c) 0x##b
10553 static const unsigned short thumb_op16[] = { T16_32_TAB };
10554 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
10557 #define X(a,b,c) 0x##c
10558 static const unsigned int thumb_op32[] = { T16_32_TAB };
10559 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
10560 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
10564 /* Thumb instruction encoders, in alphabetical order. */
10566 /* ADDW or SUBW. */
10569 do_t_add_sub_w (void)
10573 Rd = inst.operands[0].reg;
10574 Rn = inst.operands[1].reg;
10576 /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
10577 is the SP-{plus,minus}-immediate form of the instruction. */
10579 constraint (Rd == REG_PC, BAD_PC);
10581 reject_bad_reg (Rd);
10583 inst.instruction |= (Rn << 16) | (Rd << 8);
10584 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10587 /* Parse an add or subtract instruction. We get here with inst.instruction
10588 equaling any of THUMB_OPCODE_add, adds, sub, or subs. */
10591 do_t_add_sub (void)
10595 Rd = inst.operands[0].reg;
10596 Rs = (inst.operands[1].present
10597 ? inst.operands[1].reg /* Rd, Rs, foo */
10598 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10601 set_it_insn_type_last ();
10603 if (unified_syntax)
10606 bfd_boolean narrow;
10609 flags = (inst.instruction == T_MNEM_adds
10610 || inst.instruction == T_MNEM_subs);
10612 narrow = !in_it_block ();
10614 narrow = in_it_block ();
10615 if (!inst.operands[2].isreg)
10619 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10620 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10622 add = (inst.instruction == T_MNEM_add
10623 || inst.instruction == T_MNEM_adds);
10625 if (inst.size_req != 4)
10627 /* Attempt to use a narrow opcode, with relaxation if
10629 if (Rd == REG_SP && Rs == REG_SP && !flags)
10630 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
10631 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
10632 opcode = T_MNEM_add_sp;
10633 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
10634 opcode = T_MNEM_add_pc;
10635 else if (Rd <= 7 && Rs <= 7 && narrow)
10638 opcode = add ? T_MNEM_addis : T_MNEM_subis;
10640 opcode = add ? T_MNEM_addi : T_MNEM_subi;
10644 inst.instruction = THUMB_OP16(opcode);
10645 inst.instruction |= (Rd << 4) | Rs;
10646 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10647 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
10649 if (inst.size_req == 2)
10650 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10652 inst.relax = opcode;
10656 constraint (inst.size_req == 2, BAD_HIREG);
10658 if (inst.size_req == 4
10659 || (inst.size_req != 2 && !opcode))
10661 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10662 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10663 THUMB1_RELOC_ONLY);
10666 constraint (add, BAD_PC);
10667 constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
10668 _("only SUBS PC, LR, #const allowed"));
10669 constraint (inst.reloc.exp.X_op != O_constant,
10670 _("expression too complex"));
10671 constraint (inst.reloc.exp.X_add_number < 0
10672 || inst.reloc.exp.X_add_number > 0xff,
10673 _("immediate value out of range"));
10674 inst.instruction = T2_SUBS_PC_LR
10675 | inst.reloc.exp.X_add_number;
10676 inst.reloc.type = BFD_RELOC_UNUSED;
10679 else if (Rs == REG_PC)
10681 /* Always use addw/subw. */
10682 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
10683 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
10687 inst.instruction = THUMB_OP32 (inst.instruction);
10688 inst.instruction = (inst.instruction & 0xe1ffffff)
10691 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10693 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
10695 inst.instruction |= Rd << 8;
10696 inst.instruction |= Rs << 16;
10701 unsigned int value = inst.reloc.exp.X_add_number;
10702 unsigned int shift = inst.operands[2].shift_kind;
10704 Rn = inst.operands[2].reg;
10705 /* See if we can do this with a 16-bit instruction. */
10706 if (!inst.operands[2].shifted && inst.size_req != 4)
10708 if (Rd > 7 || Rs > 7 || Rn > 7)
10713 inst.instruction = ((inst.instruction == T_MNEM_adds
10714 || inst.instruction == T_MNEM_add)
10716 : T_OPCODE_SUB_R3);
10717 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10721 if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
10723 /* Thumb-1 cores (except v6-M) require at least one high
10724 register in a narrow non flag setting add. */
10725 if (Rd > 7 || Rn > 7
10726 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
10727 || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
10734 inst.instruction = T_OPCODE_ADD_HI;
10735 inst.instruction |= (Rd & 8) << 4;
10736 inst.instruction |= (Rd & 7);
10737 inst.instruction |= Rn << 3;
10743 constraint (Rd == REG_PC, BAD_PC);
10744 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10745 constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
10746 constraint (Rs == REG_PC, BAD_PC);
10747 reject_bad_reg (Rn);
10749 /* If we get here, it can't be done in 16 bits. */
10750 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
10751 _("shift must be constant"));
10752 inst.instruction = THUMB_OP32 (inst.instruction);
10753 inst.instruction |= Rd << 8;
10754 inst.instruction |= Rs << 16;
10755 constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
10756 _("shift value over 3 not allowed in thumb mode"));
10757 constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
10758 _("only LSL shift allowed in thumb mode"));
10759 encode_thumb32_shifted_operand (2);
10764 constraint (inst.instruction == T_MNEM_adds
10765 || inst.instruction == T_MNEM_subs,
10768 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
10770 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
10771 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
10774 inst.instruction = (inst.instruction == T_MNEM_add
10775 ? 0x0000 : 0x8000);
10776 inst.instruction |= (Rd << 4) | Rs;
10777 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10781 Rn = inst.operands[2].reg;
10782 constraint (inst.operands[2].shifted, _("unshifted register required"));
10784 /* We now have Rd, Rs, and Rn set to registers. */
10785 if (Rd > 7 || Rs > 7 || Rn > 7)
10787 /* Can't do this for SUB. */
10788 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
10789 inst.instruction = T_OPCODE_ADD_HI;
10790 inst.instruction |= (Rd & 8) << 4;
10791 inst.instruction |= (Rd & 7);
10793 inst.instruction |= Rn << 3;
10795 inst.instruction |= Rs << 3;
10797 constraint (1, _("dest must overlap one source register"));
10801 inst.instruction = (inst.instruction == T_MNEM_add
10802 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
10803 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
10813 Rd = inst.operands[0].reg;
10814 reject_bad_reg (Rd);
10816 if (unified_syntax && inst.size_req == 0 && Rd <= 7)
10818 /* Defer to section relaxation. */
10819 inst.relax = inst.instruction;
10820 inst.instruction = THUMB_OP16 (inst.instruction);
10821 inst.instruction |= Rd << 4;
10823 else if (unified_syntax && inst.size_req != 2)
10825 /* Generate a 32-bit opcode. */
10826 inst.instruction = THUMB_OP32 (inst.instruction);
10827 inst.instruction |= Rd << 8;
10828 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
10829 inst.reloc.pc_rel = 1;
10833 /* Generate a 16-bit opcode. */
10834 inst.instruction = THUMB_OP16 (inst.instruction);
10835 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
10836 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
10837 inst.reloc.pc_rel = 1;
10838 inst.instruction |= Rd << 4;
10841 if (inst.reloc.exp.X_op == O_symbol
10842 && inst.reloc.exp.X_add_symbol != NULL
10843 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
10844 && THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
10845 inst.reloc.exp.X_add_number += 1;
10848 /* Arithmetic instructions for which there is just one 16-bit
10849 instruction encoding, and it allows only two low registers.
10850 For maximal compatibility with ARM syntax, we allow three register
10851 operands even when Thumb-32 instructions are not available, as long
10852 as the first two are identical. For instance, both "sbc r0,r1" and
10853 "sbc r0,r0,r1" are allowed. */
10859 Rd = inst.operands[0].reg;
10860 Rs = (inst.operands[1].present
10861 ? inst.operands[1].reg /* Rd, Rs, foo */
10862 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10863 Rn = inst.operands[2].reg;
10865 reject_bad_reg (Rd);
10866 reject_bad_reg (Rs);
10867 if (inst.operands[2].isreg)
10868 reject_bad_reg (Rn);
10870 if (unified_syntax)
10872 if (!inst.operands[2].isreg)
10874 /* For an immediate, we always generate a 32-bit opcode;
10875 section relaxation will shrink it later if possible. */
10876 inst.instruction = THUMB_OP32 (inst.instruction);
10877 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10878 inst.instruction |= Rd << 8;
10879 inst.instruction |= Rs << 16;
10880 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10884 bfd_boolean narrow;
10886 /* See if we can do this with a 16-bit instruction. */
10887 if (THUMB_SETS_FLAGS (inst.instruction))
10888 narrow = !in_it_block ();
10890 narrow = in_it_block ();
10892 if (Rd > 7 || Rn > 7 || Rs > 7)
10894 if (inst.operands[2].shifted)
10896 if (inst.size_req == 4)
10902 inst.instruction = THUMB_OP16 (inst.instruction);
10903 inst.instruction |= Rd;
10904 inst.instruction |= Rn << 3;
10908 /* If we get here, it can't be done in 16 bits. */
10909 constraint (inst.operands[2].shifted
10910 && inst.operands[2].immisreg,
10911 _("shift must be constant"));
10912 inst.instruction = THUMB_OP32 (inst.instruction);
10913 inst.instruction |= Rd << 8;
10914 inst.instruction |= Rs << 16;
10915 encode_thumb32_shifted_operand (2);
10920 /* On its face this is a lie - the instruction does set the
10921 flags. However, the only supported mnemonic in this mode
10922 says it doesn't. */
10923 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
10925 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
10926 _("unshifted register required"));
10927 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
10928 constraint (Rd != Rs,
10929 _("dest and source1 must be the same register"));
10931 inst.instruction = THUMB_OP16 (inst.instruction);
10932 inst.instruction |= Rd;
10933 inst.instruction |= Rn << 3;
10937 /* Similarly, but for instructions where the arithmetic operation is
10938 commutative, so we can allow either of them to be different from
10939 the destination operand in a 16-bit instruction. For instance, all
10940 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
10947 Rd = inst.operands[0].reg;
10948 Rs = (inst.operands[1].present
10949 ? inst.operands[1].reg /* Rd, Rs, foo */
10950 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
10951 Rn = inst.operands[2].reg;
10953 reject_bad_reg (Rd);
10954 reject_bad_reg (Rs);
10955 if (inst.operands[2].isreg)
10956 reject_bad_reg (Rn);
10958 if (unified_syntax)
10960 if (!inst.operands[2].isreg)
10962 /* For an immediate, we always generate a 32-bit opcode;
10963 section relaxation will shrink it later if possible. */
10964 inst.instruction = THUMB_OP32 (inst.instruction);
10965 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
10966 inst.instruction |= Rd << 8;
10967 inst.instruction |= Rs << 16;
10968 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
10972 bfd_boolean narrow;
10974 /* See if we can do this with a 16-bit instruction. */
10975 if (THUMB_SETS_FLAGS (inst.instruction))
10976 narrow = !in_it_block ();
10978 narrow = in_it_block ();
10980 if (Rd > 7 || Rn > 7 || Rs > 7)
10982 if (inst.operands[2].shifted)
10984 if (inst.size_req == 4)
10991 inst.instruction = THUMB_OP16 (inst.instruction);
10992 inst.instruction |= Rd;
10993 inst.instruction |= Rn << 3;
10998 inst.instruction = THUMB_OP16 (inst.instruction);
10999 inst.instruction |= Rd;
11000 inst.instruction |= Rs << 3;
11005 /* If we get here, it can't be done in 16 bits. */
11006 constraint (inst.operands[2].shifted
11007 && inst.operands[2].immisreg,
11008 _("shift must be constant"));
11009 inst.instruction = THUMB_OP32 (inst.instruction);
11010 inst.instruction |= Rd << 8;
11011 inst.instruction |= Rs << 16;
11012 encode_thumb32_shifted_operand (2);
11017 /* On its face this is a lie - the instruction does set the
11018 flags. However, the only supported mnemonic in this mode
11019 says it doesn't. */
11020 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11022 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11023 _("unshifted register required"));
11024 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11026 inst.instruction = THUMB_OP16 (inst.instruction);
11027 inst.instruction |= Rd;
11030 inst.instruction |= Rn << 3;
11032 inst.instruction |= Rs << 3;
11034 constraint (1, _("dest must overlap one source register"));
11042 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
11043 constraint (msb > 32, _("bit-field extends past end of register"));
11044 /* The instruction encoding stores the LSB and MSB,
11045 not the LSB and width. */
11046 Rd = inst.operands[0].reg;
11047 reject_bad_reg (Rd);
11048 inst.instruction |= Rd << 8;
11049 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
11050 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
11051 inst.instruction |= msb - 1;
11060 Rd = inst.operands[0].reg;
11061 reject_bad_reg (Rd);
11063 /* #0 in second position is alternative syntax for bfc, which is
11064 the same instruction but with REG_PC in the Rm field. */
11065 if (!inst.operands[1].isreg)
11069 Rn = inst.operands[1].reg;
11070 reject_bad_reg (Rn);
11073 msb = inst.operands[2].imm + inst.operands[3].imm;
11074 constraint (msb > 32, _("bit-field extends past end of register"));
11075 /* The instruction encoding stores the LSB and MSB,
11076 not the LSB and width. */
11077 inst.instruction |= Rd << 8;
11078 inst.instruction |= Rn << 16;
11079 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11080 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11081 inst.instruction |= msb - 1;
11089 Rd = inst.operands[0].reg;
11090 Rn = inst.operands[1].reg;
11092 reject_bad_reg (Rd);
11093 reject_bad_reg (Rn);
11095 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
11096 _("bit-field extends past end of register"));
11097 inst.instruction |= Rd << 8;
11098 inst.instruction |= Rn << 16;
11099 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
11100 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
11101 inst.instruction |= inst.operands[3].imm - 1;
11104 /* ARM V5 Thumb BLX (argument parse)
11105 BLX <target_addr> which is BLX(1)
11106 BLX <Rm> which is BLX(2)
11107 Unfortunately, there are two different opcodes for this mnemonic.
11108 So, the insns[].value is not used, and the code here zaps values
11109 into inst.instruction.
11111 ??? How to take advantage of the additional two bits of displacement
11112 available in Thumb32 mode? Need new relocation? */
11117 set_it_insn_type_last ();
11119 if (inst.operands[0].isreg)
11121 constraint (inst.operands[0].reg == REG_PC, BAD_PC);
11122 /* We have a register, so this is BLX(2). */
11123 inst.instruction |= inst.operands[0].reg << 3;
11127 /* No register. This must be BLX(1). */
11128 inst.instruction = 0xf000e800;
11129 encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
11138 bfd_reloc_code_real_type reloc;
11141 set_it_insn_type (IF_INSIDE_IT_LAST_INSN);
11143 if (in_it_block ())
11145 /* Conditional branches inside IT blocks are encoded as unconditional
11147 cond = COND_ALWAYS;
11152 if (cond != COND_ALWAYS)
11153 opcode = T_MNEM_bcond;
11155 opcode = inst.instruction;
11158 && (inst.size_req == 4
11159 || (inst.size_req != 2
11160 && (inst.operands[0].hasreloc
11161 || inst.reloc.exp.X_op == O_constant))))
11163 inst.instruction = THUMB_OP32(opcode);
11164 if (cond == COND_ALWAYS)
11165 reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
11168 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
11169 _("selected architecture does not support "
11170 "wide conditional branch instruction"));
11172 gas_assert (cond != 0xF);
11173 inst.instruction |= cond << 22;
11174 reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
11179 inst.instruction = THUMB_OP16(opcode);
11180 if (cond == COND_ALWAYS)
11181 reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
11184 inst.instruction |= cond << 8;
11185 reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
11187 /* Allow section relaxation. */
11188 if (unified_syntax && inst.size_req != 2)
11189 inst.relax = opcode;
11191 inst.reloc.type = reloc;
11192 inst.reloc.pc_rel = 1;
11195 /* Actually do the work for Thumb state bkpt and hlt. The only difference
11196 between the two is the maximum immediate allowed - which is passed in
11199 do_t_bkpt_hlt1 (int range)
11201 constraint (inst.cond != COND_ALWAYS,
11202 _("instruction is always unconditional"));
11203 if (inst.operands[0].present)
11205 constraint (inst.operands[0].imm > range,
11206 _("immediate value out of range"));
11207 inst.instruction |= inst.operands[0].imm;
11210 set_it_insn_type (NEUTRAL_IT_INSN);
11216 do_t_bkpt_hlt1 (63);
11222 do_t_bkpt_hlt1 (255);
11226 do_t_branch23 (void)
11228 set_it_insn_type_last ();
11229 encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
11231 /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
11232 this file. We used to simply ignore the PLT reloc type here --
11233 the branch encoding is now needed to deal with TLSCALL relocs.
11234 So if we see a PLT reloc now, put it back to how it used to be to
11235 keep the preexisting behaviour. */
11236 if (inst.reloc.type == BFD_RELOC_ARM_PLT32)
11237 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
11239 #if defined(OBJ_COFF)
11240 /* If the destination of the branch is a defined symbol which does not have
11241 the THUMB_FUNC attribute, then we must be calling a function which has
11242 the (interfacearm) attribute. We look for the Thumb entry point to that
11243 function and change the branch to refer to that function instead. */
11244 if ( inst.reloc.exp.X_op == O_symbol
11245 && inst.reloc.exp.X_add_symbol != NULL
11246 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
11247 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
11248 inst.reloc.exp.X_add_symbol =
11249 find_real_start (inst.reloc.exp.X_add_symbol);
11256 set_it_insn_type_last ();
11257 inst.instruction |= inst.operands[0].reg << 3;
11258 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
11259 should cause the alignment to be checked once it is known. This is
11260 because BX PC only works if the instruction is word aligned. */
11268 set_it_insn_type_last ();
11269 Rm = inst.operands[0].reg;
11270 reject_bad_reg (Rm);
11271 inst.instruction |= Rm << 16;
11280 Rd = inst.operands[0].reg;
11281 Rm = inst.operands[1].reg;
11283 reject_bad_reg (Rd);
11284 reject_bad_reg (Rm);
11286 inst.instruction |= Rd << 8;
11287 inst.instruction |= Rm << 16;
11288 inst.instruction |= Rm;
11294 set_it_insn_type (OUTSIDE_IT_INSN);
11300 set_it_insn_type (OUTSIDE_IT_INSN);
11301 inst.instruction |= inst.operands[0].imm;
11307 set_it_insn_type (OUTSIDE_IT_INSN);
11309 && (inst.operands[1].present || inst.size_req == 4)
11310 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
11312 unsigned int imod = (inst.instruction & 0x0030) >> 4;
11313 inst.instruction = 0xf3af8000;
11314 inst.instruction |= imod << 9;
11315 inst.instruction |= inst.operands[0].imm << 5;
11316 if (inst.operands[1].present)
11317 inst.instruction |= 0x100 | inst.operands[1].imm;
11321 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
11322 && (inst.operands[0].imm & 4),
11323 _("selected processor does not support 'A' form "
11324 "of this instruction"));
11325 constraint (inst.operands[1].present || inst.size_req == 4,
11326 _("Thumb does not support the 2-argument "
11327 "form of this instruction"));
11328 inst.instruction |= inst.operands[0].imm;
11332 /* THUMB CPY instruction (argument parse). */
11337 if (inst.size_req == 4)
11339 inst.instruction = THUMB_OP32 (T_MNEM_mov);
11340 inst.instruction |= inst.operands[0].reg << 8;
11341 inst.instruction |= inst.operands[1].reg;
11345 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
11346 inst.instruction |= (inst.operands[0].reg & 0x7);
11347 inst.instruction |= inst.operands[1].reg << 3;
11354 set_it_insn_type (OUTSIDE_IT_INSN);
11355 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11356 inst.instruction |= inst.operands[0].reg;
11357 inst.reloc.pc_rel = 1;
11358 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
11364 inst.instruction |= inst.operands[0].imm;
11370 unsigned Rd, Rn, Rm;
11372 Rd = inst.operands[0].reg;
11373 Rn = (inst.operands[1].present
11374 ? inst.operands[1].reg : Rd);
11375 Rm = inst.operands[2].reg;
11377 reject_bad_reg (Rd);
11378 reject_bad_reg (Rn);
11379 reject_bad_reg (Rm);
11381 inst.instruction |= Rd << 8;
11382 inst.instruction |= Rn << 16;
11383 inst.instruction |= Rm;
11389 if (unified_syntax && inst.size_req == 4)
11390 inst.instruction = THUMB_OP32 (inst.instruction);
11392 inst.instruction = THUMB_OP16 (inst.instruction);
11398 unsigned int cond = inst.operands[0].imm;
11400 set_it_insn_type (IT_INSN);
11401 now_it.mask = (inst.instruction & 0xf) | 0x10;
11403 now_it.warn_deprecated = FALSE;
11405 /* If the condition is a negative condition, invert the mask. */
11406 if ((cond & 0x1) == 0x0)
11408 unsigned int mask = inst.instruction & 0x000f;
11410 if ((mask & 0x7) == 0)
11412 /* No conversion needed. */
11413 now_it.block_length = 1;
11415 else if ((mask & 0x3) == 0)
11418 now_it.block_length = 2;
11420 else if ((mask & 0x1) == 0)
11423 now_it.block_length = 3;
11428 now_it.block_length = 4;
11431 inst.instruction &= 0xfff0;
11432 inst.instruction |= mask;
11435 inst.instruction |= cond << 4;
11438 /* Helper function used for both push/pop and ldm/stm. */
11440 encode_thumb2_ldmstm (int base, unsigned mask, bfd_boolean writeback)
11444 load = (inst.instruction & (1 << 20)) != 0;
11446 if (mask & (1 << 13))
11447 inst.error = _("SP not allowed in register list");
11449 if ((mask & (1 << base)) != 0
11451 inst.error = _("having the base register in the register list when "
11452 "using write back is UNPREDICTABLE");
11456 if (mask & (1 << 15))
11458 if (mask & (1 << 14))
11459 inst.error = _("LR and PC should not both be in register list");
11461 set_it_insn_type_last ();
11466 if (mask & (1 << 15))
11467 inst.error = _("PC not allowed in register list");
11470 if ((mask & (mask - 1)) == 0)
11472 /* Single register transfers implemented as str/ldr. */
11475 if (inst.instruction & (1 << 23))
11476 inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
11478 inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
11482 if (inst.instruction & (1 << 23))
11483 inst.instruction = 0x00800000; /* ia -> [base] */
11485 inst.instruction = 0x00000c04; /* db -> [base, #-4] */
11488 inst.instruction |= 0xf8400000;
11490 inst.instruction |= 0x00100000;
11492 mask = ffs (mask) - 1;
11495 else if (writeback)
11496 inst.instruction |= WRITE_BACK;
11498 inst.instruction |= mask;
11499 inst.instruction |= base << 16;
11505 /* This really doesn't seem worth it. */
11506 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
11507 _("expression too complex"));
11508 constraint (inst.operands[1].writeback,
11509 _("Thumb load/store multiple does not support {reglist}^"));
11511 if (unified_syntax)
11513 bfd_boolean narrow;
11517 /* See if we can use a 16-bit instruction. */
11518 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
11519 && inst.size_req != 4
11520 && !(inst.operands[1].imm & ~0xff))
11522 mask = 1 << inst.operands[0].reg;
11524 if (inst.operands[0].reg <= 7)
11526 if (inst.instruction == T_MNEM_stmia
11527 ? inst.operands[0].writeback
11528 : (inst.operands[0].writeback
11529 == !(inst.operands[1].imm & mask)))
11531 if (inst.instruction == T_MNEM_stmia
11532 && (inst.operands[1].imm & mask)
11533 && (inst.operands[1].imm & (mask - 1)))
11534 as_warn (_("value stored for r%d is UNKNOWN"),
11535 inst.operands[0].reg);
11537 inst.instruction = THUMB_OP16 (inst.instruction);
11538 inst.instruction |= inst.operands[0].reg << 8;
11539 inst.instruction |= inst.operands[1].imm;
11542 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11544 /* This means 1 register in reg list one of 3 situations:
11545 1. Instruction is stmia, but without writeback.
11546 2. lmdia without writeback, but with Rn not in
11548 3. ldmia with writeback, but with Rn in reglist.
11549 Case 3 is UNPREDICTABLE behaviour, so we handle
11550 case 1 and 2 which can be converted into a 16-bit
11551 str or ldr. The SP cases are handled below. */
11552 unsigned long opcode;
11553 /* First, record an error for Case 3. */
11554 if (inst.operands[1].imm & mask
11555 && inst.operands[0].writeback)
11557 _("having the base register in the register list when "
11558 "using write back is UNPREDICTABLE");
11560 opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
11562 inst.instruction = THUMB_OP16 (opcode);
11563 inst.instruction |= inst.operands[0].reg << 3;
11564 inst.instruction |= (ffs (inst.operands[1].imm)-1);
11568 else if (inst.operands[0] .reg == REG_SP)
11570 if (inst.operands[0].writeback)
11573 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11574 ? T_MNEM_push : T_MNEM_pop);
11575 inst.instruction |= inst.operands[1].imm;
11578 else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
11581 THUMB_OP16 (inst.instruction == T_MNEM_stmia
11582 ? T_MNEM_str_sp : T_MNEM_ldr_sp);
11583 inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
11591 if (inst.instruction < 0xffff)
11592 inst.instruction = THUMB_OP32 (inst.instruction);
11594 encode_thumb2_ldmstm (inst.operands[0].reg, inst.operands[1].imm,
11595 inst.operands[0].writeback);
11600 constraint (inst.operands[0].reg > 7
11601 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
11602 constraint (inst.instruction != T_MNEM_ldmia
11603 && inst.instruction != T_MNEM_stmia,
11604 _("Thumb-2 instruction only valid in unified syntax"));
11605 if (inst.instruction == T_MNEM_stmia)
11607 if (!inst.operands[0].writeback)
11608 as_warn (_("this instruction will write back the base register"));
11609 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
11610 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
11611 as_warn (_("value stored for r%d is UNKNOWN"),
11612 inst.operands[0].reg);
11616 if (!inst.operands[0].writeback
11617 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
11618 as_warn (_("this instruction will write back the base register"));
11619 else if (inst.operands[0].writeback
11620 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
11621 as_warn (_("this instruction will not write back the base register"));
11624 inst.instruction = THUMB_OP16 (inst.instruction);
11625 inst.instruction |= inst.operands[0].reg << 8;
11626 inst.instruction |= inst.operands[1].imm;
11633 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11634 || inst.operands[1].postind || inst.operands[1].writeback
11635 || inst.operands[1].immisreg || inst.operands[1].shifted
11636 || inst.operands[1].negative,
11639 constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
11641 inst.instruction |= inst.operands[0].reg << 12;
11642 inst.instruction |= inst.operands[1].reg << 16;
11643 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
11649 if (!inst.operands[1].present)
11651 constraint (inst.operands[0].reg == REG_LR,
11652 _("r14 not allowed as first register "
11653 "when second register is omitted"));
11654 inst.operands[1].reg = inst.operands[0].reg + 1;
11656 constraint (inst.operands[0].reg == inst.operands[1].reg,
11659 inst.instruction |= inst.operands[0].reg << 12;
11660 inst.instruction |= inst.operands[1].reg << 8;
11661 inst.instruction |= inst.operands[2].reg << 16;
11667 unsigned long opcode;
11670 if (inst.operands[0].isreg
11671 && !inst.operands[0].preind
11672 && inst.operands[0].reg == REG_PC)
11673 set_it_insn_type_last ();
11675 opcode = inst.instruction;
11676 if (unified_syntax)
11678 if (!inst.operands[1].isreg)
11680 if (opcode <= 0xffff)
11681 inst.instruction = THUMB_OP32 (opcode);
11682 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11685 if (inst.operands[1].isreg
11686 && !inst.operands[1].writeback
11687 && !inst.operands[1].shifted && !inst.operands[1].postind
11688 && !inst.operands[1].negative && inst.operands[0].reg <= 7
11689 && opcode <= 0xffff
11690 && inst.size_req != 4)
11692 /* Insn may have a 16-bit form. */
11693 Rn = inst.operands[1].reg;
11694 if (inst.operands[1].immisreg)
11696 inst.instruction = THUMB_OP16 (opcode);
11698 if (Rn <= 7 && inst.operands[1].imm <= 7)
11700 else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
11701 reject_bad_reg (inst.operands[1].imm);
11703 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
11704 && opcode != T_MNEM_ldrsb)
11705 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
11706 || (Rn == REG_SP && opcode == T_MNEM_str))
11713 if (inst.reloc.pc_rel)
11714 opcode = T_MNEM_ldr_pc2;
11716 opcode = T_MNEM_ldr_pc;
11720 if (opcode == T_MNEM_ldr)
11721 opcode = T_MNEM_ldr_sp;
11723 opcode = T_MNEM_str_sp;
11725 inst.instruction = inst.operands[0].reg << 8;
11729 inst.instruction = inst.operands[0].reg;
11730 inst.instruction |= inst.operands[1].reg << 3;
11732 inst.instruction |= THUMB_OP16 (opcode);
11733 if (inst.size_req == 2)
11734 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11736 inst.relax = opcode;
11740 /* Definitely a 32-bit variant. */
11742 /* Warning for Erratum 752419. */
11743 if (opcode == T_MNEM_ldr
11744 && inst.operands[0].reg == REG_SP
11745 && inst.operands[1].writeback == 1
11746 && !inst.operands[1].immisreg)
11748 if (no_cpu_selected ()
11749 || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
11750 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
11751 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
11752 as_warn (_("This instruction may be unpredictable "
11753 "if executed on M-profile cores "
11754 "with interrupts enabled."));
11757 /* Do some validations regarding addressing modes. */
11758 if (inst.operands[1].immisreg)
11759 reject_bad_reg (inst.operands[1].imm);
11761 constraint (inst.operands[1].writeback == 1
11762 && inst.operands[0].reg == inst.operands[1].reg,
11765 inst.instruction = THUMB_OP32 (opcode);
11766 inst.instruction |= inst.operands[0].reg << 12;
11767 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
11768 check_ldr_r15_aligned ();
11772 constraint (inst.operands[0].reg > 7, BAD_HIREG);
11774 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
11776 /* Only [Rn,Rm] is acceptable. */
11777 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
11778 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
11779 || inst.operands[1].postind || inst.operands[1].shifted
11780 || inst.operands[1].negative,
11781 _("Thumb does not support this addressing mode"));
11782 inst.instruction = THUMB_OP16 (inst.instruction);
11786 inst.instruction = THUMB_OP16 (inst.instruction);
11787 if (!inst.operands[1].isreg)
11788 if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
11791 constraint (!inst.operands[1].preind
11792 || inst.operands[1].shifted
11793 || inst.operands[1].writeback,
11794 _("Thumb does not support this addressing mode"));
11795 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
11797 constraint (inst.instruction & 0x0600,
11798 _("byte or halfword not valid for base register"));
11799 constraint (inst.operands[1].reg == REG_PC
11800 && !(inst.instruction & THUMB_LOAD_BIT),
11801 _("r15 based store not allowed"));
11802 constraint (inst.operands[1].immisreg,
11803 _("invalid base register for register offset"));
11805 if (inst.operands[1].reg == REG_PC)
11806 inst.instruction = T_OPCODE_LDR_PC;
11807 else if (inst.instruction & THUMB_LOAD_BIT)
11808 inst.instruction = T_OPCODE_LDR_SP;
11810 inst.instruction = T_OPCODE_STR_SP;
11812 inst.instruction |= inst.operands[0].reg << 8;
11813 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11817 constraint (inst.operands[1].reg > 7, BAD_HIREG);
11818 if (!inst.operands[1].immisreg)
11820 /* Immediate offset. */
11821 inst.instruction |= inst.operands[0].reg;
11822 inst.instruction |= inst.operands[1].reg << 3;
11823 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
11827 /* Register offset. */
11828 constraint (inst.operands[1].imm > 7, BAD_HIREG);
11829 constraint (inst.operands[1].negative,
11830 _("Thumb does not support this addressing mode"));
11833 switch (inst.instruction)
11835 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
11836 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
11837 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
11838 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
11839 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
11840 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
11841 case 0x5600 /* ldrsb */:
11842 case 0x5e00 /* ldrsh */: break;
11846 inst.instruction |= inst.operands[0].reg;
11847 inst.instruction |= inst.operands[1].reg << 3;
11848 inst.instruction |= inst.operands[1].imm << 6;
11854 if (!inst.operands[1].present)
11856 inst.operands[1].reg = inst.operands[0].reg + 1;
11857 constraint (inst.operands[0].reg == REG_LR,
11858 _("r14 not allowed here"));
11859 constraint (inst.operands[0].reg == REG_R12,
11860 _("r12 not allowed here"));
11863 if (inst.operands[2].writeback
11864 && (inst.operands[0].reg == inst.operands[2].reg
11865 || inst.operands[1].reg == inst.operands[2].reg))
11866 as_warn (_("base register written back, and overlaps "
11867 "one of transfer registers"));
11869 inst.instruction |= inst.operands[0].reg << 12;
11870 inst.instruction |= inst.operands[1].reg << 8;
11871 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
11877 inst.instruction |= inst.operands[0].reg << 12;
11878 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
11884 unsigned Rd, Rn, Rm, Ra;
11886 Rd = inst.operands[0].reg;
11887 Rn = inst.operands[1].reg;
11888 Rm = inst.operands[2].reg;
11889 Ra = inst.operands[3].reg;
11891 reject_bad_reg (Rd);
11892 reject_bad_reg (Rn);
11893 reject_bad_reg (Rm);
11894 reject_bad_reg (Ra);
11896 inst.instruction |= Rd << 8;
11897 inst.instruction |= Rn << 16;
11898 inst.instruction |= Rm;
11899 inst.instruction |= Ra << 12;
11905 unsigned RdLo, RdHi, Rn, Rm;
11907 RdLo = inst.operands[0].reg;
11908 RdHi = inst.operands[1].reg;
11909 Rn = inst.operands[2].reg;
11910 Rm = inst.operands[3].reg;
11912 reject_bad_reg (RdLo);
11913 reject_bad_reg (RdHi);
11914 reject_bad_reg (Rn);
11915 reject_bad_reg (Rm);
11917 inst.instruction |= RdLo << 12;
11918 inst.instruction |= RdHi << 8;
11919 inst.instruction |= Rn << 16;
11920 inst.instruction |= Rm;
11924 do_t_mov_cmp (void)
11928 Rn = inst.operands[0].reg;
11929 Rm = inst.operands[1].reg;
11932 set_it_insn_type_last ();
11934 if (unified_syntax)
11936 int r0off = (inst.instruction == T_MNEM_mov
11937 || inst.instruction == T_MNEM_movs) ? 8 : 16;
11938 unsigned long opcode;
11939 bfd_boolean narrow;
11940 bfd_boolean low_regs;
11942 low_regs = (Rn <= 7 && Rm <= 7);
11943 opcode = inst.instruction;
11944 if (in_it_block ())
11945 narrow = opcode != T_MNEM_movs;
11947 narrow = opcode != T_MNEM_movs || low_regs;
11948 if (inst.size_req == 4
11949 || inst.operands[1].shifted)
11952 /* MOVS PC, LR is encoded as SUBS PC, LR, #0. */
11953 if (opcode == T_MNEM_movs && inst.operands[1].isreg
11954 && !inst.operands[1].shifted
11958 inst.instruction = T2_SUBS_PC_LR;
11962 if (opcode == T_MNEM_cmp)
11964 constraint (Rn == REG_PC, BAD_PC);
11967 /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
11969 warn_deprecated_sp (Rm);
11970 /* R15 was documented as a valid choice for Rm in ARMv6,
11971 but as UNPREDICTABLE in ARMv7. ARM's proprietary
11972 tools reject R15, so we do too. */
11973 constraint (Rm == REG_PC, BAD_PC);
11976 reject_bad_reg (Rm);
11978 else if (opcode == T_MNEM_mov
11979 || opcode == T_MNEM_movs)
11981 if (inst.operands[1].isreg)
11983 if (opcode == T_MNEM_movs)
11985 reject_bad_reg (Rn);
11986 reject_bad_reg (Rm);
11990 /* This is mov.n. */
11991 if ((Rn == REG_SP || Rn == REG_PC)
11992 && (Rm == REG_SP || Rm == REG_PC))
11994 as_tsktsk (_("Use of r%u as a source register is "
11995 "deprecated when r%u is the destination "
11996 "register."), Rm, Rn);
12001 /* This is mov.w. */
12002 constraint (Rn == REG_PC, BAD_PC);
12003 constraint (Rm == REG_PC, BAD_PC);
12004 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12005 constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
12009 reject_bad_reg (Rn);
12012 if (!inst.operands[1].isreg)
12014 /* Immediate operand. */
12015 if (!in_it_block () && opcode == T_MNEM_mov)
12017 if (low_regs && narrow)
12019 inst.instruction = THUMB_OP16 (opcode);
12020 inst.instruction |= Rn << 8;
12021 if (inst.reloc.type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12022 || inst.reloc.type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
12024 if (inst.size_req == 2)
12025 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12027 inst.relax = opcode;
12032 constraint (inst.reloc.type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
12033 && inst.reloc.type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
12034 THUMB1_RELOC_ONLY);
12036 inst.instruction = THUMB_OP32 (inst.instruction);
12037 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12038 inst.instruction |= Rn << r0off;
12039 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12042 else if (inst.operands[1].shifted && inst.operands[1].immisreg
12043 && (inst.instruction == T_MNEM_mov
12044 || inst.instruction == T_MNEM_movs))
12046 /* Register shifts are encoded as separate shift instructions. */
12047 bfd_boolean flags = (inst.instruction == T_MNEM_movs);
12049 if (in_it_block ())
12054 if (inst.size_req == 4)
12057 if (!low_regs || inst.operands[1].imm > 7)
12063 switch (inst.operands[1].shift_kind)
12066 opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
12069 opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
12072 opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
12075 opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
12081 inst.instruction = opcode;
12084 inst.instruction |= Rn;
12085 inst.instruction |= inst.operands[1].imm << 3;
12090 inst.instruction |= CONDS_BIT;
12092 inst.instruction |= Rn << 8;
12093 inst.instruction |= Rm << 16;
12094 inst.instruction |= inst.operands[1].imm;
12099 /* Some mov with immediate shift have narrow variants.
12100 Register shifts are handled above. */
12101 if (low_regs && inst.operands[1].shifted
12102 && (inst.instruction == T_MNEM_mov
12103 || inst.instruction == T_MNEM_movs))
12105 if (in_it_block ())
12106 narrow = (inst.instruction == T_MNEM_mov);
12108 narrow = (inst.instruction == T_MNEM_movs);
12113 switch (inst.operands[1].shift_kind)
12115 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12116 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12117 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12118 default: narrow = FALSE; break;
12124 inst.instruction |= Rn;
12125 inst.instruction |= Rm << 3;
12126 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12130 inst.instruction = THUMB_OP32 (inst.instruction);
12131 inst.instruction |= Rn << r0off;
12132 encode_thumb32_shifted_operand (1);
12136 switch (inst.instruction)
12139 /* In v4t or v5t a move of two lowregs produces unpredictable
12140 results. Don't allow this. */
12143 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
12144 "MOV Rd, Rs with two low registers is not "
12145 "permitted on this architecture");
12146 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12150 inst.instruction = T_OPCODE_MOV_HR;
12151 inst.instruction |= (Rn & 0x8) << 4;
12152 inst.instruction |= (Rn & 0x7);
12153 inst.instruction |= Rm << 3;
12157 /* We know we have low registers at this point.
12158 Generate LSLS Rd, Rs, #0. */
12159 inst.instruction = T_OPCODE_LSL_I;
12160 inst.instruction |= Rn;
12161 inst.instruction |= Rm << 3;
12167 inst.instruction = T_OPCODE_CMP_LR;
12168 inst.instruction |= Rn;
12169 inst.instruction |= Rm << 3;
12173 inst.instruction = T_OPCODE_CMP_HR;
12174 inst.instruction |= (Rn & 0x8) << 4;
12175 inst.instruction |= (Rn & 0x7);
12176 inst.instruction |= Rm << 3;
12183 inst.instruction = THUMB_OP16 (inst.instruction);
12185 /* PR 10443: Do not silently ignore shifted operands. */
12186 constraint (inst.operands[1].shifted,
12187 _("shifts in CMP/MOV instructions are only supported in unified syntax"));
12189 if (inst.operands[1].isreg)
12191 if (Rn < 8 && Rm < 8)
12193 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
12194 since a MOV instruction produces unpredictable results. */
12195 if (inst.instruction == T_OPCODE_MOV_I8)
12196 inst.instruction = T_OPCODE_ADD_I3;
12198 inst.instruction = T_OPCODE_CMP_LR;
12200 inst.instruction |= Rn;
12201 inst.instruction |= Rm << 3;
12205 if (inst.instruction == T_OPCODE_MOV_I8)
12206 inst.instruction = T_OPCODE_MOV_HR;
12208 inst.instruction = T_OPCODE_CMP_HR;
12214 constraint (Rn > 7,
12215 _("only lo regs allowed with immediate"));
12216 inst.instruction |= Rn << 8;
12217 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
12228 top = (inst.instruction & 0x00800000) != 0;
12229 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
12231 constraint (top, _(":lower16: not allowed in this instruction"));
12232 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
12234 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
12236 constraint (!top, _(":upper16: not allowed in this instruction"));
12237 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
12240 Rd = inst.operands[0].reg;
12241 reject_bad_reg (Rd);
12243 inst.instruction |= Rd << 8;
12244 if (inst.reloc.type == BFD_RELOC_UNUSED)
12246 imm = inst.reloc.exp.X_add_number;
12247 inst.instruction |= (imm & 0xf000) << 4;
12248 inst.instruction |= (imm & 0x0800) << 15;
12249 inst.instruction |= (imm & 0x0700) << 4;
12250 inst.instruction |= (imm & 0x00ff);
12255 do_t_mvn_tst (void)
12259 Rn = inst.operands[0].reg;
12260 Rm = inst.operands[1].reg;
12262 if (inst.instruction == T_MNEM_cmp
12263 || inst.instruction == T_MNEM_cmn)
12264 constraint (Rn == REG_PC, BAD_PC);
12266 reject_bad_reg (Rn);
12267 reject_bad_reg (Rm);
12269 if (unified_syntax)
12271 int r0off = (inst.instruction == T_MNEM_mvn
12272 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
12273 bfd_boolean narrow;
12275 if (inst.size_req == 4
12276 || inst.instruction > 0xffff
12277 || inst.operands[1].shifted
12278 || Rn > 7 || Rm > 7)
12280 else if (inst.instruction == T_MNEM_cmn
12281 || inst.instruction == T_MNEM_tst)
12283 else if (THUMB_SETS_FLAGS (inst.instruction))
12284 narrow = !in_it_block ();
12286 narrow = in_it_block ();
12288 if (!inst.operands[1].isreg)
12290 /* For an immediate, we always generate a 32-bit opcode;
12291 section relaxation will shrink it later if possible. */
12292 if (inst.instruction < 0xffff)
12293 inst.instruction = THUMB_OP32 (inst.instruction);
12294 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12295 inst.instruction |= Rn << r0off;
12296 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12300 /* See if we can do this with a 16-bit instruction. */
12303 inst.instruction = THUMB_OP16 (inst.instruction);
12304 inst.instruction |= Rn;
12305 inst.instruction |= Rm << 3;
12309 constraint (inst.operands[1].shifted
12310 && inst.operands[1].immisreg,
12311 _("shift must be constant"));
12312 if (inst.instruction < 0xffff)
12313 inst.instruction = THUMB_OP32 (inst.instruction);
12314 inst.instruction |= Rn << r0off;
12315 encode_thumb32_shifted_operand (1);
12321 constraint (inst.instruction > 0xffff
12322 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
12323 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
12324 _("unshifted register required"));
12325 constraint (Rn > 7 || Rm > 7,
12328 inst.instruction = THUMB_OP16 (inst.instruction);
12329 inst.instruction |= Rn;
12330 inst.instruction |= Rm << 3;
12339 if (do_vfp_nsyn_mrs () == SUCCESS)
12342 Rd = inst.operands[0].reg;
12343 reject_bad_reg (Rd);
12344 inst.instruction |= Rd << 8;
12346 if (inst.operands[1].isreg)
12348 unsigned br = inst.operands[1].reg;
12349 if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
12350 as_bad (_("bad register for mrs"));
12352 inst.instruction |= br & (0xf << 16);
12353 inst.instruction |= (br & 0x300) >> 4;
12354 inst.instruction |= (br & SPSR_BIT) >> 2;
12358 int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12360 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12362 /* PR gas/12698: The constraint is only applied for m_profile.
12363 If the user has specified -march=all, we want to ignore it as
12364 we are building for any CPU type, including non-m variants. */
12365 bfd_boolean m_profile =
12366 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12367 constraint ((flags != 0) && m_profile, _("selected processor does "
12368 "not support requested special purpose register"));
12371 /* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
12373 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
12374 _("'APSR', 'CPSR' or 'SPSR' expected"));
12376 inst.instruction |= (flags & SPSR_BIT) >> 2;
12377 inst.instruction |= inst.operands[1].imm & 0xff;
12378 inst.instruction |= 0xf0000;
12388 if (do_vfp_nsyn_msr () == SUCCESS)
12391 constraint (!inst.operands[1].isreg,
12392 _("Thumb encoding does not support an immediate here"));
12394 if (inst.operands[0].isreg)
12395 flags = (int)(inst.operands[0].reg);
12397 flags = inst.operands[0].imm;
12399 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
12401 int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
12403 /* PR gas/12698: The constraint is only applied for m_profile.
12404 If the user has specified -march=all, we want to ignore it as
12405 we are building for any CPU type, including non-m variants. */
12406 bfd_boolean m_profile =
12407 !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
12408 constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12409 && (bits & ~(PSR_s | PSR_f)) != 0)
12410 || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
12411 && bits != PSR_f)) && m_profile,
12412 _("selected processor does not support requested special "
12413 "purpose register"));
12416 constraint ((flags & 0xff) != 0, _("selected processor does not support "
12417 "requested special purpose register"));
12419 Rn = inst.operands[1].reg;
12420 reject_bad_reg (Rn);
12422 inst.instruction |= (flags & SPSR_BIT) >> 2;
12423 inst.instruction |= (flags & 0xf0000) >> 8;
12424 inst.instruction |= (flags & 0x300) >> 4;
12425 inst.instruction |= (flags & 0xff);
12426 inst.instruction |= Rn << 16;
12432 bfd_boolean narrow;
12433 unsigned Rd, Rn, Rm;
12435 if (!inst.operands[2].present)
12436 inst.operands[2].reg = inst.operands[0].reg;
12438 Rd = inst.operands[0].reg;
12439 Rn = inst.operands[1].reg;
12440 Rm = inst.operands[2].reg;
12442 if (unified_syntax)
12444 if (inst.size_req == 4
12450 else if (inst.instruction == T_MNEM_muls)
12451 narrow = !in_it_block ();
12453 narrow = in_it_block ();
12457 constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
12458 constraint (Rn > 7 || Rm > 7,
12465 /* 16-bit MULS/Conditional MUL. */
12466 inst.instruction = THUMB_OP16 (inst.instruction);
12467 inst.instruction |= Rd;
12470 inst.instruction |= Rm << 3;
12472 inst.instruction |= Rn << 3;
12474 constraint (1, _("dest must overlap one source register"));
12478 constraint (inst.instruction != T_MNEM_mul,
12479 _("Thumb-2 MUL must not set flags"));
12481 inst.instruction = THUMB_OP32 (inst.instruction);
12482 inst.instruction |= Rd << 8;
12483 inst.instruction |= Rn << 16;
12484 inst.instruction |= Rm << 0;
12486 reject_bad_reg (Rd);
12487 reject_bad_reg (Rn);
12488 reject_bad_reg (Rm);
12495 unsigned RdLo, RdHi, Rn, Rm;
12497 RdLo = inst.operands[0].reg;
12498 RdHi = inst.operands[1].reg;
12499 Rn = inst.operands[2].reg;
12500 Rm = inst.operands[3].reg;
12502 reject_bad_reg (RdLo);
12503 reject_bad_reg (RdHi);
12504 reject_bad_reg (Rn);
12505 reject_bad_reg (Rm);
12507 inst.instruction |= RdLo << 12;
12508 inst.instruction |= RdHi << 8;
12509 inst.instruction |= Rn << 16;
12510 inst.instruction |= Rm;
12513 as_tsktsk (_("rdhi and rdlo must be different"));
12519 set_it_insn_type (NEUTRAL_IT_INSN);
12521 if (unified_syntax)
12523 if (inst.size_req == 4 || inst.operands[0].imm > 15)
12525 inst.instruction = THUMB_OP32 (inst.instruction);
12526 inst.instruction |= inst.operands[0].imm;
12530 /* PR9722: Check for Thumb2 availability before
12531 generating a thumb2 nop instruction. */
12532 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
12534 inst.instruction = THUMB_OP16 (inst.instruction);
12535 inst.instruction |= inst.operands[0].imm << 4;
12538 inst.instruction = 0x46c0;
12543 constraint (inst.operands[0].present,
12544 _("Thumb does not support NOP with hints"));
12545 inst.instruction = 0x46c0;
12552 if (unified_syntax)
12554 bfd_boolean narrow;
12556 if (THUMB_SETS_FLAGS (inst.instruction))
12557 narrow = !in_it_block ();
12559 narrow = in_it_block ();
12560 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12562 if (inst.size_req == 4)
12567 inst.instruction = THUMB_OP32 (inst.instruction);
12568 inst.instruction |= inst.operands[0].reg << 8;
12569 inst.instruction |= inst.operands[1].reg << 16;
12573 inst.instruction = THUMB_OP16 (inst.instruction);
12574 inst.instruction |= inst.operands[0].reg;
12575 inst.instruction |= inst.operands[1].reg << 3;
12580 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
12582 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12584 inst.instruction = THUMB_OP16 (inst.instruction);
12585 inst.instruction |= inst.operands[0].reg;
12586 inst.instruction |= inst.operands[1].reg << 3;
12595 Rd = inst.operands[0].reg;
12596 Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
12598 reject_bad_reg (Rd);
12599 /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN. */
12600 reject_bad_reg (Rn);
12602 inst.instruction |= Rd << 8;
12603 inst.instruction |= Rn << 16;
12605 if (!inst.operands[2].isreg)
12607 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12608 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12614 Rm = inst.operands[2].reg;
12615 reject_bad_reg (Rm);
12617 constraint (inst.operands[2].shifted
12618 && inst.operands[2].immisreg,
12619 _("shift must be constant"));
12620 encode_thumb32_shifted_operand (2);
12627 unsigned Rd, Rn, Rm;
12629 Rd = inst.operands[0].reg;
12630 Rn = inst.operands[1].reg;
12631 Rm = inst.operands[2].reg;
12633 reject_bad_reg (Rd);
12634 reject_bad_reg (Rn);
12635 reject_bad_reg (Rm);
12637 inst.instruction |= Rd << 8;
12638 inst.instruction |= Rn << 16;
12639 inst.instruction |= Rm;
12640 if (inst.operands[3].present)
12642 unsigned int val = inst.reloc.exp.X_add_number;
12643 constraint (inst.reloc.exp.X_op != O_constant,
12644 _("expression too complex"));
12645 inst.instruction |= (val & 0x1c) << 10;
12646 inst.instruction |= (val & 0x03) << 6;
12653 if (!inst.operands[3].present)
12657 inst.instruction &= ~0x00000020;
12659 /* PR 10168. Swap the Rm and Rn registers. */
12660 Rtmp = inst.operands[1].reg;
12661 inst.operands[1].reg = inst.operands[2].reg;
12662 inst.operands[2].reg = Rtmp;
12670 if (inst.operands[0].immisreg)
12671 reject_bad_reg (inst.operands[0].imm);
12673 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
12677 do_t_push_pop (void)
12681 constraint (inst.operands[0].writeback,
12682 _("push/pop do not support {reglist}^"));
12683 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
12684 _("expression too complex"));
12686 mask = inst.operands[0].imm;
12687 if (inst.size_req != 4 && (mask & ~0xff) == 0)
12688 inst.instruction = THUMB_OP16 (inst.instruction) | mask;
12689 else if (inst.size_req != 4
12690 && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
12691 ? REG_LR : REG_PC)))
12693 inst.instruction = THUMB_OP16 (inst.instruction);
12694 inst.instruction |= THUMB_PP_PC_LR;
12695 inst.instruction |= mask & 0xff;
12697 else if (unified_syntax)
12699 inst.instruction = THUMB_OP32 (inst.instruction);
12700 encode_thumb2_ldmstm (13, mask, TRUE);
12704 inst.error = _("invalid register list to push/pop instruction");
12714 Rd = inst.operands[0].reg;
12715 Rm = inst.operands[1].reg;
12717 reject_bad_reg (Rd);
12718 reject_bad_reg (Rm);
12720 inst.instruction |= Rd << 8;
12721 inst.instruction |= Rm << 16;
12722 inst.instruction |= Rm;
12730 Rd = inst.operands[0].reg;
12731 Rm = inst.operands[1].reg;
12733 reject_bad_reg (Rd);
12734 reject_bad_reg (Rm);
12736 if (Rd <= 7 && Rm <= 7
12737 && inst.size_req != 4)
12739 inst.instruction = THUMB_OP16 (inst.instruction);
12740 inst.instruction |= Rd;
12741 inst.instruction |= Rm << 3;
12743 else if (unified_syntax)
12745 inst.instruction = THUMB_OP32 (inst.instruction);
12746 inst.instruction |= Rd << 8;
12747 inst.instruction |= Rm << 16;
12748 inst.instruction |= Rm;
12751 inst.error = BAD_HIREG;
12759 Rd = inst.operands[0].reg;
12760 Rm = inst.operands[1].reg;
12762 reject_bad_reg (Rd);
12763 reject_bad_reg (Rm);
12765 inst.instruction |= Rd << 8;
12766 inst.instruction |= Rm;
12774 Rd = inst.operands[0].reg;
12775 Rs = (inst.operands[1].present
12776 ? inst.operands[1].reg /* Rd, Rs, foo */
12777 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
12779 reject_bad_reg (Rd);
12780 reject_bad_reg (Rs);
12781 if (inst.operands[2].isreg)
12782 reject_bad_reg (inst.operands[2].reg);
12784 inst.instruction |= Rd << 8;
12785 inst.instruction |= Rs << 16;
12786 if (!inst.operands[2].isreg)
12788 bfd_boolean narrow;
12790 if ((inst.instruction & 0x00100000) != 0)
12791 narrow = !in_it_block ();
12793 narrow = in_it_block ();
12795 if (Rd > 7 || Rs > 7)
12798 if (inst.size_req == 4 || !unified_syntax)
12801 if (inst.reloc.exp.X_op != O_constant
12802 || inst.reloc.exp.X_add_number != 0)
12805 /* Turn rsb #0 into 16-bit neg. We should probably do this via
12806 relaxation, but it doesn't seem worth the hassle. */
12809 inst.reloc.type = BFD_RELOC_UNUSED;
12810 inst.instruction = THUMB_OP16 (T_MNEM_negs);
12811 inst.instruction |= Rs << 3;
12812 inst.instruction |= Rd;
12816 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
12817 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
12821 encode_thumb32_shifted_operand (2);
12827 if (warn_on_deprecated
12828 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
12829 as_tsktsk (_("setend use is deprecated for ARMv8"));
12831 set_it_insn_type (OUTSIDE_IT_INSN);
12832 if (inst.operands[0].imm)
12833 inst.instruction |= 0x8;
12839 if (!inst.operands[1].present)
12840 inst.operands[1].reg = inst.operands[0].reg;
12842 if (unified_syntax)
12844 bfd_boolean narrow;
12847 switch (inst.instruction)
12850 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
12852 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
12854 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
12856 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
12860 if (THUMB_SETS_FLAGS (inst.instruction))
12861 narrow = !in_it_block ();
12863 narrow = in_it_block ();
12864 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
12866 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
12868 if (inst.operands[2].isreg
12869 && (inst.operands[1].reg != inst.operands[0].reg
12870 || inst.operands[2].reg > 7))
12872 if (inst.size_req == 4)
12875 reject_bad_reg (inst.operands[0].reg);
12876 reject_bad_reg (inst.operands[1].reg);
12880 if (inst.operands[2].isreg)
12882 reject_bad_reg (inst.operands[2].reg);
12883 inst.instruction = THUMB_OP32 (inst.instruction);
12884 inst.instruction |= inst.operands[0].reg << 8;
12885 inst.instruction |= inst.operands[1].reg << 16;
12886 inst.instruction |= inst.operands[2].reg;
12888 /* PR 12854: Error on extraneous shifts. */
12889 constraint (inst.operands[2].shifted,
12890 _("extraneous shift as part of operand to shift insn"));
12894 inst.operands[1].shifted = 1;
12895 inst.operands[1].shift_kind = shift_kind;
12896 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
12897 ? T_MNEM_movs : T_MNEM_mov);
12898 inst.instruction |= inst.operands[0].reg << 8;
12899 encode_thumb32_shifted_operand (1);
12900 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
12901 inst.reloc.type = BFD_RELOC_UNUSED;
12906 if (inst.operands[2].isreg)
12908 switch (shift_kind)
12910 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
12911 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
12912 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
12913 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
12917 inst.instruction |= inst.operands[0].reg;
12918 inst.instruction |= inst.operands[2].reg << 3;
12920 /* PR 12854: Error on extraneous shifts. */
12921 constraint (inst.operands[2].shifted,
12922 _("extraneous shift as part of operand to shift insn"));
12926 switch (shift_kind)
12928 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
12929 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
12930 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
12933 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12934 inst.instruction |= inst.operands[0].reg;
12935 inst.instruction |= inst.operands[1].reg << 3;
12941 constraint (inst.operands[0].reg > 7
12942 || inst.operands[1].reg > 7, BAD_HIREG);
12943 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12945 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
12947 constraint (inst.operands[2].reg > 7, BAD_HIREG);
12948 constraint (inst.operands[0].reg != inst.operands[1].reg,
12949 _("source1 and dest must be same register"));
12951 switch (inst.instruction)
12953 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
12954 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
12955 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
12956 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
12960 inst.instruction |= inst.operands[0].reg;
12961 inst.instruction |= inst.operands[2].reg << 3;
12963 /* PR 12854: Error on extraneous shifts. */
12964 constraint (inst.operands[2].shifted,
12965 _("extraneous shift as part of operand to shift insn"));
12969 switch (inst.instruction)
12971 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
12972 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
12973 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
12974 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
12977 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
12978 inst.instruction |= inst.operands[0].reg;
12979 inst.instruction |= inst.operands[1].reg << 3;
12987 unsigned Rd, Rn, Rm;
12989 Rd = inst.operands[0].reg;
12990 Rn = inst.operands[1].reg;
12991 Rm = inst.operands[2].reg;
12993 reject_bad_reg (Rd);
12994 reject_bad_reg (Rn);
12995 reject_bad_reg (Rm);
12997 inst.instruction |= Rd << 8;
12998 inst.instruction |= Rn << 16;
12999 inst.instruction |= Rm;
13005 unsigned Rd, Rn, Rm;
13007 Rd = inst.operands[0].reg;
13008 Rm = inst.operands[1].reg;
13009 Rn = inst.operands[2].reg;
13011 reject_bad_reg (Rd);
13012 reject_bad_reg (Rn);
13013 reject_bad_reg (Rm);
13015 inst.instruction |= Rd << 8;
13016 inst.instruction |= Rn << 16;
13017 inst.instruction |= Rm;
13023 unsigned int value = inst.reloc.exp.X_add_number;
13024 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
13025 _("SMC is not permitted on this architecture"));
13026 constraint (inst.reloc.exp.X_op != O_constant,
13027 _("expression too complex"));
13028 inst.reloc.type = BFD_RELOC_UNUSED;
13029 inst.instruction |= (value & 0xf000) >> 12;
13030 inst.instruction |= (value & 0x0ff0);
13031 inst.instruction |= (value & 0x000f) << 16;
13032 /* PR gas/15623: SMC instructions must be last in an IT block. */
13033 set_it_insn_type_last ();
13039 unsigned int value = inst.reloc.exp.X_add_number;
13041 inst.reloc.type = BFD_RELOC_UNUSED;
13042 inst.instruction |= (value & 0x0fff);
13043 inst.instruction |= (value & 0xf000) << 4;
13047 do_t_ssat_usat (int bias)
13051 Rd = inst.operands[0].reg;
13052 Rn = inst.operands[2].reg;
13054 reject_bad_reg (Rd);
13055 reject_bad_reg (Rn);
13057 inst.instruction |= Rd << 8;
13058 inst.instruction |= inst.operands[1].imm - bias;
13059 inst.instruction |= Rn << 16;
13061 if (inst.operands[3].present)
13063 offsetT shift_amount = inst.reloc.exp.X_add_number;
13065 inst.reloc.type = BFD_RELOC_UNUSED;
13067 constraint (inst.reloc.exp.X_op != O_constant,
13068 _("expression too complex"));
13070 if (shift_amount != 0)
13072 constraint (shift_amount > 31,
13073 _("shift expression is too large"));
13075 if (inst.operands[3].shift_kind == SHIFT_ASR)
13076 inst.instruction |= 0x00200000; /* sh bit. */
13078 inst.instruction |= (shift_amount & 0x1c) << 10;
13079 inst.instruction |= (shift_amount & 0x03) << 6;
13087 do_t_ssat_usat (1);
13095 Rd = inst.operands[0].reg;
13096 Rn = inst.operands[2].reg;
13098 reject_bad_reg (Rd);
13099 reject_bad_reg (Rn);
13101 inst.instruction |= Rd << 8;
13102 inst.instruction |= inst.operands[1].imm - 1;
13103 inst.instruction |= Rn << 16;
13109 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
13110 || inst.operands[2].postind || inst.operands[2].writeback
13111 || inst.operands[2].immisreg || inst.operands[2].shifted
13112 || inst.operands[2].negative,
13115 constraint (inst.operands[2].reg == REG_PC, BAD_PC);
13117 inst.instruction |= inst.operands[0].reg << 8;
13118 inst.instruction |= inst.operands[1].reg << 12;
13119 inst.instruction |= inst.operands[2].reg << 16;
13120 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
13126 if (!inst.operands[2].present)
13127 inst.operands[2].reg = inst.operands[1].reg + 1;
13129 constraint (inst.operands[0].reg == inst.operands[1].reg
13130 || inst.operands[0].reg == inst.operands[2].reg
13131 || inst.operands[0].reg == inst.operands[3].reg,
13134 inst.instruction |= inst.operands[0].reg;
13135 inst.instruction |= inst.operands[1].reg << 12;
13136 inst.instruction |= inst.operands[2].reg << 8;
13137 inst.instruction |= inst.operands[3].reg << 16;
13143 unsigned Rd, Rn, Rm;
13145 Rd = inst.operands[0].reg;
13146 Rn = inst.operands[1].reg;
13147 Rm = inst.operands[2].reg;
13149 reject_bad_reg (Rd);
13150 reject_bad_reg (Rn);
13151 reject_bad_reg (Rm);
13153 inst.instruction |= Rd << 8;
13154 inst.instruction |= Rn << 16;
13155 inst.instruction |= Rm;
13156 inst.instruction |= inst.operands[3].imm << 4;
13164 Rd = inst.operands[0].reg;
13165 Rm = inst.operands[1].reg;
13167 reject_bad_reg (Rd);
13168 reject_bad_reg (Rm);
13170 if (inst.instruction <= 0xffff
13171 && inst.size_req != 4
13172 && Rd <= 7 && Rm <= 7
13173 && (!inst.operands[2].present || inst.operands[2].imm == 0))
13175 inst.instruction = THUMB_OP16 (inst.instruction);
13176 inst.instruction |= Rd;
13177 inst.instruction |= Rm << 3;
13179 else if (unified_syntax)
13181 if (inst.instruction <= 0xffff)
13182 inst.instruction = THUMB_OP32 (inst.instruction);
13183 inst.instruction |= Rd << 8;
13184 inst.instruction |= Rm;
13185 inst.instruction |= inst.operands[2].imm << 4;
13189 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
13190 _("Thumb encoding does not support rotation"));
13191 constraint (1, BAD_HIREG);
13198 inst.reloc.type = BFD_RELOC_ARM_SWI;
13207 half = (inst.instruction & 0x10) != 0;
13208 set_it_insn_type_last ();
13209 constraint (inst.operands[0].immisreg,
13210 _("instruction requires register index"));
13212 Rn = inst.operands[0].reg;
13213 Rm = inst.operands[0].imm;
13215 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13216 constraint (Rn == REG_SP, BAD_SP);
13217 reject_bad_reg (Rm);
13219 constraint (!half && inst.operands[0].shifted,
13220 _("instruction does not allow shifted index"));
13221 inst.instruction |= (Rn << 16) | Rm;
13227 if (!inst.operands[0].present)
13228 inst.operands[0].imm = 0;
13230 if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
13232 constraint (inst.size_req == 2,
13233 _("immediate value out of range"));
13234 inst.instruction = THUMB_OP32 (inst.instruction);
13235 inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
13236 inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
13240 inst.instruction = THUMB_OP16 (inst.instruction);
13241 inst.instruction |= inst.operands[0].imm;
13244 set_it_insn_type (NEUTRAL_IT_INSN);
13251 do_t_ssat_usat (0);
13259 Rd = inst.operands[0].reg;
13260 Rn = inst.operands[2].reg;
13262 reject_bad_reg (Rd);
13263 reject_bad_reg (Rn);
13265 inst.instruction |= Rd << 8;
13266 inst.instruction |= inst.operands[1].imm;
13267 inst.instruction |= Rn << 16;
13270 /* Neon instruction encoder helpers. */
13272 /* Encodings for the different types for various Neon opcodes. */
13274 /* An "invalid" code for the following tables. */
13277 struct neon_tab_entry
13280 unsigned float_or_poly;
13281 unsigned scalar_or_imm;
13284 /* Map overloaded Neon opcodes to their respective encodings. */
13285 #define NEON_ENC_TAB \
13286 X(vabd, 0x0000700, 0x1200d00, N_INV), \
13287 X(vmax, 0x0000600, 0x0000f00, N_INV), \
13288 X(vmin, 0x0000610, 0x0200f00, N_INV), \
13289 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
13290 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
13291 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
13292 X(vadd, 0x0000800, 0x0000d00, N_INV), \
13293 X(vsub, 0x1000800, 0x0200d00, N_INV), \
13294 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
13295 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
13296 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
13297 /* Register variants of the following two instructions are encoded as
13298 vcge / vcgt with the operands reversed. */ \
13299 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
13300 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
13301 X(vfma, N_INV, 0x0000c10, N_INV), \
13302 X(vfms, N_INV, 0x0200c10, N_INV), \
13303 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
13304 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
13305 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
13306 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
13307 X(vmlal, 0x0800800, N_INV, 0x0800240), \
13308 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
13309 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
13310 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
13311 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
13312 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
13313 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
13314 X(vqrdmlah, 0x3000b10, N_INV, 0x0800e40), \
13315 X(vqrdmlsh, 0x3000c10, N_INV, 0x0800f40), \
13316 X(vshl, 0x0000400, N_INV, 0x0800510), \
13317 X(vqshl, 0x0000410, N_INV, 0x0800710), \
13318 X(vand, 0x0000110, N_INV, 0x0800030), \
13319 X(vbic, 0x0100110, N_INV, 0x0800030), \
13320 X(veor, 0x1000110, N_INV, N_INV), \
13321 X(vorn, 0x0300110, N_INV, 0x0800010), \
13322 X(vorr, 0x0200110, N_INV, 0x0800010), \
13323 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
13324 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
13325 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
13326 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
13327 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
13328 X(vst1, 0x0000000, 0x0800000, N_INV), \
13329 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
13330 X(vst2, 0x0000100, 0x0800100, N_INV), \
13331 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
13332 X(vst3, 0x0000200, 0x0800200, N_INV), \
13333 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
13334 X(vst4, 0x0000300, 0x0800300, N_INV), \
13335 X(vmovn, 0x1b20200, N_INV, N_INV), \
13336 X(vtrn, 0x1b20080, N_INV, N_INV), \
13337 X(vqmovn, 0x1b20200, N_INV, N_INV), \
13338 X(vqmovun, 0x1b20240, N_INV, N_INV), \
13339 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
13340 X(vnmla, 0xe100a40, 0xe100b40, N_INV), \
13341 X(vnmls, 0xe100a00, 0xe100b00, N_INV), \
13342 X(vfnma, 0xe900a40, 0xe900b40, N_INV), \
13343 X(vfnms, 0xe900a00, 0xe900b00, N_INV), \
13344 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
13345 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
13346 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
13347 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV), \
13348 X(vseleq, 0xe000a00, N_INV, N_INV), \
13349 X(vselvs, 0xe100a00, N_INV, N_INV), \
13350 X(vselge, 0xe200a00, N_INV, N_INV), \
13351 X(vselgt, 0xe300a00, N_INV, N_INV), \
13352 X(vmaxnm, 0xe800a00, 0x3000f10, N_INV), \
13353 X(vminnm, 0xe800a40, 0x3200f10, N_INV), \
13354 X(vcvta, 0xebc0a40, 0x3bb0000, N_INV), \
13355 X(vrintr, 0xeb60a40, 0x3ba0400, N_INV), \
13356 X(vrinta, 0xeb80a40, 0x3ba0400, N_INV), \
13357 X(aes, 0x3b00300, N_INV, N_INV), \
13358 X(sha3op, 0x2000c00, N_INV, N_INV), \
13359 X(sha1h, 0x3b902c0, N_INV, N_INV), \
13360 X(sha2op, 0x3ba0380, N_INV, N_INV)
13364 #define X(OPC,I,F,S) N_MNEM_##OPC
13369 static const struct neon_tab_entry neon_enc_tab[] =
13371 #define X(OPC,I,F,S) { (I), (F), (S) }
13376 /* Do not use these macros; instead, use NEON_ENCODE defined below. */
13377 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13378 #define NEON_ENC_ARMREG_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13379 #define NEON_ENC_POLY_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13380 #define NEON_ENC_FLOAT_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13381 #define NEON_ENC_SCALAR_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13382 #define NEON_ENC_IMMED_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13383 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
13384 #define NEON_ENC_LANE_(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
13385 #define NEON_ENC_DUP_(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
13386 #define NEON_ENC_SINGLE_(X) \
13387 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
13388 #define NEON_ENC_DOUBLE_(X) \
13389 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
13390 #define NEON_ENC_FPV8_(X) \
13391 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
13393 #define NEON_ENCODE(type, inst) \
13396 inst.instruction = NEON_ENC_##type##_ (inst.instruction); \
13397 inst.is_neon = 1; \
13401 #define check_neon_suffixes \
13404 if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon) \
13406 as_bad (_("invalid neon suffix for non neon instruction")); \
13412 /* Define shapes for instruction operands. The following mnemonic characters
13413 are used in this table:
13415 F - VFP S<n> register
13416 D - Neon D<n> register
13417 Q - Neon Q<n> register
13421 L - D<n> register list
13423 This table is used to generate various data:
13424 - enumerations of the form NS_DDR to be used as arguments to
13426 - a table classifying shapes into single, double, quad, mixed.
13427 - a table used to drive neon_select_shape. */
13429 #define NEON_SHAPE_DEF \
13430 X(3, (D, D, D), DOUBLE), \
13431 X(3, (Q, Q, Q), QUAD), \
13432 X(3, (D, D, I), DOUBLE), \
13433 X(3, (Q, Q, I), QUAD), \
13434 X(3, (D, D, S), DOUBLE), \
13435 X(3, (Q, Q, S), QUAD), \
13436 X(2, (D, D), DOUBLE), \
13437 X(2, (Q, Q), QUAD), \
13438 X(2, (D, S), DOUBLE), \
13439 X(2, (Q, S), QUAD), \
13440 X(2, (D, R), DOUBLE), \
13441 X(2, (Q, R), QUAD), \
13442 X(2, (D, I), DOUBLE), \
13443 X(2, (Q, I), QUAD), \
13444 X(3, (D, L, D), DOUBLE), \
13445 X(2, (D, Q), MIXED), \
13446 X(2, (Q, D), MIXED), \
13447 X(3, (D, Q, I), MIXED), \
13448 X(3, (Q, D, I), MIXED), \
13449 X(3, (Q, D, D), MIXED), \
13450 X(3, (D, Q, Q), MIXED), \
13451 X(3, (Q, Q, D), MIXED), \
13452 X(3, (Q, D, S), MIXED), \
13453 X(3, (D, Q, S), MIXED), \
13454 X(4, (D, D, D, I), DOUBLE), \
13455 X(4, (Q, Q, Q, I), QUAD), \
13456 X(4, (D, D, S, I), DOUBLE), \
13457 X(4, (Q, Q, S, I), QUAD), \
13458 X(2, (F, F), SINGLE), \
13459 X(3, (F, F, F), SINGLE), \
13460 X(2, (F, I), SINGLE), \
13461 X(2, (F, D), MIXED), \
13462 X(2, (D, F), MIXED), \
13463 X(3, (F, F, I), MIXED), \
13464 X(4, (R, R, F, F), SINGLE), \
13465 X(4, (F, F, R, R), SINGLE), \
13466 X(3, (D, R, R), DOUBLE), \
13467 X(3, (R, R, D), DOUBLE), \
13468 X(2, (S, R), SINGLE), \
13469 X(2, (R, S), SINGLE), \
13470 X(2, (F, R), SINGLE), \
13471 X(2, (R, F), SINGLE), \
13472 /* Half float shape supported so far. */\
13473 X (2, (H, D), MIXED), \
13474 X (2, (D, H), MIXED), \
13475 X (2, (H, F), MIXED), \
13476 X (2, (F, H), MIXED), \
13477 X (2, (H, H), HALF), \
13478 X (2, (H, R), HALF), \
13479 X (2, (R, H), HALF), \
13480 X (2, (H, I), HALF), \
13481 X (3, (H, H, H), HALF), \
13482 X (3, (H, F, I), MIXED), \
13483 X (3, (F, H, I), MIXED), \
13484 X (3, (D, H, H), MIXED), \
13485 X (3, (D, H, S), MIXED)
13487 #define S2(A,B) NS_##A##B
13488 #define S3(A,B,C) NS_##A##B##C
13489 #define S4(A,B,C,D) NS_##A##B##C##D
13491 #define X(N, L, C) S##N L
13504 enum neon_shape_class
13513 #define X(N, L, C) SC_##C
13515 static enum neon_shape_class neon_shape_class[] =
13534 /* Register widths of above. */
13535 static unsigned neon_shape_el_size[] =
13547 struct neon_shape_info
13550 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
13553 #define S2(A,B) { SE_##A, SE_##B }
13554 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
13555 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
13557 #define X(N, L, C) { N, S##N L }
13559 static struct neon_shape_info neon_shape_tab[] =
13569 /* Bit masks used in type checking given instructions.
13570 'N_EQK' means the type must be the same as (or based on in some way) the key
13571 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
13572 set, various other bits can be set as well in order to modify the meaning of
13573 the type constraint. */
13575 enum neon_type_mask
13599 N_KEY = 0x1000000, /* Key element (main type specifier). */
13600 N_EQK = 0x2000000, /* Given operand has the same type & size as the key. */
13601 N_VFP = 0x4000000, /* VFP mode: operand size must match register width. */
13602 N_UNT = 0x8000000, /* Must be explicitly untyped. */
13603 N_DBL = 0x0000001, /* If N_EQK, this operand is twice the size. */
13604 N_HLF = 0x0000002, /* If N_EQK, this operand is half the size. */
13605 N_SGN = 0x0000004, /* If N_EQK, this operand is forced to be signed. */
13606 N_UNS = 0x0000008, /* If N_EQK, this operand is forced to be unsigned. */
13607 N_INT = 0x0000010, /* If N_EQK, this operand is forced to be integer. */
13608 N_FLT = 0x0000020, /* If N_EQK, this operand is forced to be float. */
13609 N_SIZ = 0x0000040, /* If N_EQK, this operand is forced to be size-only. */
13611 N_MAX_NONSPECIAL = N_P64
13614 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
13616 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
13617 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
13618 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
13619 #define N_S_32 (N_S8 | N_S16 | N_S32)
13620 #define N_F_16_32 (N_F16 | N_F32)
13621 #define N_SUF_32 (N_SU_32 | N_F_16_32)
13622 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
13623 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
13624 #define N_F_ALL (N_F16 | N_F32 | N_F64)
13626 /* Pass this as the first type argument to neon_check_type to ignore types
13628 #define N_IGNORE_TYPE (N_KEY | N_EQK)
13630 /* Select a "shape" for the current instruction (describing register types or
13631 sizes) from a list of alternatives. Return NS_NULL if the current instruction
13632 doesn't fit. For non-polymorphic shapes, checking is usually done as a
13633 function of operand parsing, so this function doesn't need to be called.
13634 Shapes should be listed in order of decreasing length. */
13636 static enum neon_shape
13637 neon_select_shape (enum neon_shape shape, ...)
13640 enum neon_shape first_shape = shape;
13642 /* Fix missing optional operands. FIXME: we don't know at this point how
13643 many arguments we should have, so this makes the assumption that we have
13644 > 1. This is true of all current Neon opcodes, I think, but may not be
13645 true in the future. */
13646 if (!inst.operands[1].present)
13647 inst.operands[1] = inst.operands[0];
13649 va_start (ap, shape);
13651 for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
13656 for (j = 0; j < neon_shape_tab[shape].els; j++)
13658 if (!inst.operands[j].present)
13664 switch (neon_shape_tab[shape].el[j])
13666 /* If a .f16, .16, .u16, .s16 type specifier is given over
13667 a VFP single precision register operand, it's essentially
13668 means only half of the register is used.
13670 If the type specifier is given after the mnemonics, the
13671 information is stored in inst.vectype. If the type specifier
13672 is given after register operand, the information is stored
13673 in inst.operands[].vectype.
13675 When there is only one type specifier, and all the register
13676 operands are the same type of hardware register, the type
13677 specifier applies to all register operands.
13679 If no type specifier is given, the shape is inferred from
13680 operand information.
13683 vadd.f16 s0, s1, s2: NS_HHH
13684 vabs.f16 s0, s1: NS_HH
13685 vmov.f16 s0, r1: NS_HR
13686 vmov.f16 r0, s1: NS_RH
13687 vcvt.f16 r0, s1: NS_RH
13688 vcvt.f16.s32 s2, s2, #29: NS_HFI
13689 vcvt.f16.s32 s2, s2: NS_HF
13692 if (!(inst.operands[j].isreg
13693 && inst.operands[j].isvec
13694 && inst.operands[j].issingle
13695 && !inst.operands[j].isquad
13696 && ((inst.vectype.elems == 1
13697 && inst.vectype.el[0].size == 16)
13698 || (inst.vectype.elems > 1
13699 && inst.vectype.el[j].size == 16)
13700 || (inst.vectype.elems == 0
13701 && inst.operands[j].vectype.type != NT_invtype
13702 && inst.operands[j].vectype.size == 16))))
13707 if (!(inst.operands[j].isreg
13708 && inst.operands[j].isvec
13709 && inst.operands[j].issingle
13710 && !inst.operands[j].isquad
13711 && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
13712 || (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
13713 || (inst.vectype.elems == 0
13714 && (inst.operands[j].vectype.size == 32
13715 || inst.operands[j].vectype.type == NT_invtype)))))
13720 if (!(inst.operands[j].isreg
13721 && inst.operands[j].isvec
13722 && !inst.operands[j].isquad
13723 && !inst.operands[j].issingle))
13728 if (!(inst.operands[j].isreg
13729 && !inst.operands[j].isvec))
13734 if (!(inst.operands[j].isreg
13735 && inst.operands[j].isvec
13736 && inst.operands[j].isquad
13737 && !inst.operands[j].issingle))
13742 if (!(!inst.operands[j].isreg
13743 && !inst.operands[j].isscalar))
13748 if (!(!inst.operands[j].isreg
13749 && inst.operands[j].isscalar))
13759 if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
13760 /* We've matched all the entries in the shape table, and we don't
13761 have any left over operands which have not been matched. */
13767 if (shape == NS_NULL && first_shape != NS_NULL)
13768 first_error (_("invalid instruction shape"));
13773 /* True if SHAPE is predominantly a quadword operation (most of the time, this
13774 means the Q bit should be set). */
13777 neon_quad (enum neon_shape shape)
13779 return neon_shape_class[shape] == SC_QUAD;
13783 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
13786 /* Allow modification to be made to types which are constrained to be
13787 based on the key element, based on bits set alongside N_EQK. */
13788 if ((typebits & N_EQK) != 0)
13790 if ((typebits & N_HLF) != 0)
13792 else if ((typebits & N_DBL) != 0)
13794 if ((typebits & N_SGN) != 0)
13795 *g_type = NT_signed;
13796 else if ((typebits & N_UNS) != 0)
13797 *g_type = NT_unsigned;
13798 else if ((typebits & N_INT) != 0)
13799 *g_type = NT_integer;
13800 else if ((typebits & N_FLT) != 0)
13801 *g_type = NT_float;
13802 else if ((typebits & N_SIZ) != 0)
13803 *g_type = NT_untyped;
13807 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
13808 operand type, i.e. the single type specified in a Neon instruction when it
13809 is the only one given. */
13811 static struct neon_type_el
13812 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
13814 struct neon_type_el dest = *key;
13816 gas_assert ((thisarg & N_EQK) != 0);
13818 neon_modify_type_size (thisarg, &dest.type, &dest.size);
13823 /* Convert Neon type and size into compact bitmask representation. */
13825 static enum neon_type_mask
13826 type_chk_of_el_type (enum neon_el_type type, unsigned size)
13833 case 8: return N_8;
13834 case 16: return N_16;
13835 case 32: return N_32;
13836 case 64: return N_64;
13844 case 8: return N_I8;
13845 case 16: return N_I16;
13846 case 32: return N_I32;
13847 case 64: return N_I64;
13855 case 16: return N_F16;
13856 case 32: return N_F32;
13857 case 64: return N_F64;
13865 case 8: return N_P8;
13866 case 16: return N_P16;
13867 case 64: return N_P64;
13875 case 8: return N_S8;
13876 case 16: return N_S16;
13877 case 32: return N_S32;
13878 case 64: return N_S64;
13886 case 8: return N_U8;
13887 case 16: return N_U16;
13888 case 32: return N_U32;
13889 case 64: return N_U64;
13900 /* Convert compact Neon bitmask type representation to a type and size. Only
13901 handles the case where a single bit is set in the mask. */
13904 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
13905 enum neon_type_mask mask)
13907 if ((mask & N_EQK) != 0)
13910 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
13912 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16)) != 0)
13914 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
13916 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
13921 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
13923 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
13924 *type = NT_unsigned;
13925 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
13926 *type = NT_integer;
13927 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
13928 *type = NT_untyped;
13929 else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
13931 else if ((mask & (N_F_ALL)) != 0)
13939 /* Modify a bitmask of allowed types. This is only needed for type
13943 modify_types_allowed (unsigned allowed, unsigned mods)
13946 enum neon_el_type type;
13952 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
13954 if (el_type_of_type_chk (&type, &size,
13955 (enum neon_type_mask) (allowed & i)) == SUCCESS)
13957 neon_modify_type_size (mods, &type, &size);
13958 destmask |= type_chk_of_el_type (type, size);
13965 /* Check type and return type classification.
13966 The manual states (paraphrase): If one datatype is given, it indicates the
13968 - the second operand, if there is one
13969 - the operand, if there is no second operand
13970 - the result, if there are no operands.
13971 This isn't quite good enough though, so we use a concept of a "key" datatype
13972 which is set on a per-instruction basis, which is the one which matters when
13973 only one data type is written.
13974 Note: this function has side-effects (e.g. filling in missing operands). All
13975 Neon instructions should call it before performing bit encoding. */
13977 static struct neon_type_el
13978 neon_check_type (unsigned els, enum neon_shape ns, ...)
13981 unsigned i, pass, key_el = 0;
13982 unsigned types[NEON_MAX_TYPE_ELS];
13983 enum neon_el_type k_type = NT_invtype;
13984 unsigned k_size = -1u;
13985 struct neon_type_el badtype = {NT_invtype, -1};
13986 unsigned key_allowed = 0;
13988 /* Optional registers in Neon instructions are always (not) in operand 1.
13989 Fill in the missing operand here, if it was omitted. */
13990 if (els > 1 && !inst.operands[1].present)
13991 inst.operands[1] = inst.operands[0];
13993 /* Suck up all the varargs. */
13995 for (i = 0; i < els; i++)
13997 unsigned thisarg = va_arg (ap, unsigned);
13998 if (thisarg == N_IGNORE_TYPE)
14003 types[i] = thisarg;
14004 if ((thisarg & N_KEY) != 0)
14009 if (inst.vectype.elems > 0)
14010 for (i = 0; i < els; i++)
14011 if (inst.operands[i].vectype.type != NT_invtype)
14013 first_error (_("types specified in both the mnemonic and operands"));
14017 /* Duplicate inst.vectype elements here as necessary.
14018 FIXME: No idea if this is exactly the same as the ARM assembler,
14019 particularly when an insn takes one register and one non-register
14021 if (inst.vectype.elems == 1 && els > 1)
14024 inst.vectype.elems = els;
14025 inst.vectype.el[key_el] = inst.vectype.el[0];
14026 for (j = 0; j < els; j++)
14028 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14031 else if (inst.vectype.elems == 0 && els > 0)
14034 /* No types were given after the mnemonic, so look for types specified
14035 after each operand. We allow some flexibility here; as long as the
14036 "key" operand has a type, we can infer the others. */
14037 for (j = 0; j < els; j++)
14038 if (inst.operands[j].vectype.type != NT_invtype)
14039 inst.vectype.el[j] = inst.operands[j].vectype;
14041 if (inst.operands[key_el].vectype.type != NT_invtype)
14043 for (j = 0; j < els; j++)
14044 if (inst.operands[j].vectype.type == NT_invtype)
14045 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
14050 first_error (_("operand types can't be inferred"));
14054 else if (inst.vectype.elems != els)
14056 first_error (_("type specifier has the wrong number of parts"));
14060 for (pass = 0; pass < 2; pass++)
14062 for (i = 0; i < els; i++)
14064 unsigned thisarg = types[i];
14065 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
14066 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
14067 enum neon_el_type g_type = inst.vectype.el[i].type;
14068 unsigned g_size = inst.vectype.el[i].size;
14070 /* Decay more-specific signed & unsigned types to sign-insensitive
14071 integer types if sign-specific variants are unavailable. */
14072 if ((g_type == NT_signed || g_type == NT_unsigned)
14073 && (types_allowed & N_SU_ALL) == 0)
14074 g_type = NT_integer;
14076 /* If only untyped args are allowed, decay any more specific types to
14077 them. Some instructions only care about signs for some element
14078 sizes, so handle that properly. */
14079 if (((types_allowed & N_UNT) == 0)
14080 && ((g_size == 8 && (types_allowed & N_8) != 0)
14081 || (g_size == 16 && (types_allowed & N_16) != 0)
14082 || (g_size == 32 && (types_allowed & N_32) != 0)
14083 || (g_size == 64 && (types_allowed & N_64) != 0)))
14084 g_type = NT_untyped;
14088 if ((thisarg & N_KEY) != 0)
14092 key_allowed = thisarg & ~N_KEY;
14094 /* Check architecture constraint on FP16 extension. */
14096 && k_type == NT_float
14097 && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14099 inst.error = _(BAD_FP16);
14106 if ((thisarg & N_VFP) != 0)
14108 enum neon_shape_el regshape;
14109 unsigned regwidth, match;
14111 /* PR 11136: Catch the case where we are passed a shape of NS_NULL. */
14114 first_error (_("invalid instruction shape"));
14117 regshape = neon_shape_tab[ns].el[i];
14118 regwidth = neon_shape_el_size[regshape];
14120 /* In VFP mode, operands must match register widths. If we
14121 have a key operand, use its width, else use the width of
14122 the current operand. */
14128 /* FP16 will use a single precision register. */
14129 if (regwidth == 32 && match == 16)
14131 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
14135 inst.error = _(BAD_FP16);
14140 if (regwidth != match)
14142 first_error (_("operand size must match register width"));
14147 if ((thisarg & N_EQK) == 0)
14149 unsigned given_type = type_chk_of_el_type (g_type, g_size);
14151 if ((given_type & types_allowed) == 0)
14153 first_error (_("bad type in Neon instruction"));
14159 enum neon_el_type mod_k_type = k_type;
14160 unsigned mod_k_size = k_size;
14161 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
14162 if (g_type != mod_k_type || g_size != mod_k_size)
14164 first_error (_("inconsistent types in Neon instruction"));
14172 return inst.vectype.el[key_el];
14175 /* Neon-style VFP instruction forwarding. */
14177 /* Thumb VFP instructions have 0xE in the condition field. */
14180 do_vfp_cond_or_thumb (void)
14185 inst.instruction |= 0xe0000000;
14187 inst.instruction |= inst.cond << 28;
14190 /* Look up and encode a simple mnemonic, for use as a helper function for the
14191 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
14192 etc. It is assumed that operand parsing has already been done, and that the
14193 operands are in the form expected by the given opcode (this isn't necessarily
14194 the same as the form in which they were parsed, hence some massaging must
14195 take place before this function is called).
14196 Checks current arch version against that in the looked-up opcode. */
14199 do_vfp_nsyn_opcode (const char *opname)
14201 const struct asm_opcode *opcode;
14203 opcode = (const struct asm_opcode *) hash_find (arm_ops_hsh, opname);
14208 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
14209 thumb_mode ? *opcode->tvariant : *opcode->avariant),
14216 inst.instruction = opcode->tvalue;
14217 opcode->tencode ();
14221 inst.instruction = (inst.cond << 28) | opcode->avalue;
14222 opcode->aencode ();
14227 do_vfp_nsyn_add_sub (enum neon_shape rs)
14229 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
14231 if (rs == NS_FFF || rs == NS_HHH)
14234 do_vfp_nsyn_opcode ("fadds");
14236 do_vfp_nsyn_opcode ("fsubs");
14238 /* ARMv8.2 fp16 instruction. */
14240 do_scalar_fp16_v82_encode ();
14245 do_vfp_nsyn_opcode ("faddd");
14247 do_vfp_nsyn_opcode ("fsubd");
14251 /* Check operand types to see if this is a VFP instruction, and if so call
14255 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
14257 enum neon_shape rs;
14258 struct neon_type_el et;
14263 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14264 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14268 rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14269 et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14270 N_F_ALL | N_KEY | N_VFP);
14277 if (et.type != NT_invtype)
14288 do_vfp_nsyn_mla_mls (enum neon_shape rs)
14290 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
14292 if (rs == NS_FFF || rs == NS_HHH)
14295 do_vfp_nsyn_opcode ("fmacs");
14297 do_vfp_nsyn_opcode ("fnmacs");
14299 /* ARMv8.2 fp16 instruction. */
14301 do_scalar_fp16_v82_encode ();
14306 do_vfp_nsyn_opcode ("fmacd");
14308 do_vfp_nsyn_opcode ("fnmacd");
14313 do_vfp_nsyn_fma_fms (enum neon_shape rs)
14315 int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
14317 if (rs == NS_FFF || rs == NS_HHH)
14320 do_vfp_nsyn_opcode ("ffmas");
14322 do_vfp_nsyn_opcode ("ffnmas");
14324 /* ARMv8.2 fp16 instruction. */
14326 do_scalar_fp16_v82_encode ();
14331 do_vfp_nsyn_opcode ("ffmad");
14333 do_vfp_nsyn_opcode ("ffnmad");
14338 do_vfp_nsyn_mul (enum neon_shape rs)
14340 if (rs == NS_FFF || rs == NS_HHH)
14342 do_vfp_nsyn_opcode ("fmuls");
14344 /* ARMv8.2 fp16 instruction. */
14346 do_scalar_fp16_v82_encode ();
14349 do_vfp_nsyn_opcode ("fmuld");
14353 do_vfp_nsyn_abs_neg (enum neon_shape rs)
14355 int is_neg = (inst.instruction & 0x80) != 0;
14356 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
14358 if (rs == NS_FF || rs == NS_HH)
14361 do_vfp_nsyn_opcode ("fnegs");
14363 do_vfp_nsyn_opcode ("fabss");
14365 /* ARMv8.2 fp16 instruction. */
14367 do_scalar_fp16_v82_encode ();
14372 do_vfp_nsyn_opcode ("fnegd");
14374 do_vfp_nsyn_opcode ("fabsd");
14378 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
14379 insns belong to Neon, and are handled elsewhere. */
14382 do_vfp_nsyn_ldm_stm (int is_dbmode)
14384 int is_ldm = (inst.instruction & (1 << 20)) != 0;
14388 do_vfp_nsyn_opcode ("fldmdbs");
14390 do_vfp_nsyn_opcode ("fldmias");
14395 do_vfp_nsyn_opcode ("fstmdbs");
14397 do_vfp_nsyn_opcode ("fstmias");
14402 do_vfp_nsyn_sqrt (void)
14404 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14405 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14407 if (rs == NS_FF || rs == NS_HH)
14409 do_vfp_nsyn_opcode ("fsqrts");
14411 /* ARMv8.2 fp16 instruction. */
14413 do_scalar_fp16_v82_encode ();
14416 do_vfp_nsyn_opcode ("fsqrtd");
14420 do_vfp_nsyn_div (void)
14422 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14423 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14424 N_F_ALL | N_KEY | N_VFP);
14426 if (rs == NS_FFF || rs == NS_HHH)
14428 do_vfp_nsyn_opcode ("fdivs");
14430 /* ARMv8.2 fp16 instruction. */
14432 do_scalar_fp16_v82_encode ();
14435 do_vfp_nsyn_opcode ("fdivd");
14439 do_vfp_nsyn_nmul (void)
14441 enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
14442 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
14443 N_F_ALL | N_KEY | N_VFP);
14445 if (rs == NS_FFF || rs == NS_HHH)
14447 NEON_ENCODE (SINGLE, inst);
14448 do_vfp_sp_dyadic ();
14450 /* ARMv8.2 fp16 instruction. */
14452 do_scalar_fp16_v82_encode ();
14456 NEON_ENCODE (DOUBLE, inst);
14457 do_vfp_dp_rd_rn_rm ();
14459 do_vfp_cond_or_thumb ();
14464 do_vfp_nsyn_cmp (void)
14466 enum neon_shape rs;
14467 if (inst.operands[1].isreg)
14469 rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
14470 neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
14472 if (rs == NS_FF || rs == NS_HH)
14474 NEON_ENCODE (SINGLE, inst);
14475 do_vfp_sp_monadic ();
14479 NEON_ENCODE (DOUBLE, inst);
14480 do_vfp_dp_rd_rm ();
14485 rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
14486 neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
14488 switch (inst.instruction & 0x0fffffff)
14491 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
14494 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
14500 if (rs == NS_FI || rs == NS_HI)
14502 NEON_ENCODE (SINGLE, inst);
14503 do_vfp_sp_compare_z ();
14507 NEON_ENCODE (DOUBLE, inst);
14511 do_vfp_cond_or_thumb ();
14513 /* ARMv8.2 fp16 instruction. */
14514 if (rs == NS_HI || rs == NS_HH)
14515 do_scalar_fp16_v82_encode ();
14519 nsyn_insert_sp (void)
14521 inst.operands[1] = inst.operands[0];
14522 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
14523 inst.operands[0].reg = REG_SP;
14524 inst.operands[0].isreg = 1;
14525 inst.operands[0].writeback = 1;
14526 inst.operands[0].present = 1;
14530 do_vfp_nsyn_push (void)
14534 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14535 _("register list must contain at least 1 and at most 16 "
14538 if (inst.operands[1].issingle)
14539 do_vfp_nsyn_opcode ("fstmdbs");
14541 do_vfp_nsyn_opcode ("fstmdbd");
14545 do_vfp_nsyn_pop (void)
14549 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
14550 _("register list must contain at least 1 and at most 16 "
14553 if (inst.operands[1].issingle)
14554 do_vfp_nsyn_opcode ("fldmias");
14556 do_vfp_nsyn_opcode ("fldmiad");
14559 /* Fix up Neon data-processing instructions, ORing in the correct bits for
14560 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
14563 neon_dp_fixup (struct arm_it* insn)
14565 unsigned int i = insn->instruction;
14570 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
14581 insn->instruction = i;
14584 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
14588 neon_logbits (unsigned x)
14590 return ffs (x) - 4;
14593 #define LOW4(R) ((R) & 0xf)
14594 #define HI1(R) (((R) >> 4) & 1)
14596 /* Encode insns with bit pattern:
14598 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
14599 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
14601 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
14602 different meaning for some instruction. */
14605 neon_three_same (int isquad, int ubit, int size)
14607 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14608 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14609 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
14610 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
14611 inst.instruction |= LOW4 (inst.operands[2].reg);
14612 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
14613 inst.instruction |= (isquad != 0) << 6;
14614 inst.instruction |= (ubit != 0) << 24;
14616 inst.instruction |= neon_logbits (size) << 20;
14618 neon_dp_fixup (&inst);
14621 /* Encode instructions of the form:
14623 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
14624 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
14626 Don't write size if SIZE == -1. */
14629 neon_two_same (int qbit, int ubit, int size)
14631 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14632 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14633 inst.instruction |= LOW4 (inst.operands[1].reg);
14634 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14635 inst.instruction |= (qbit != 0) << 6;
14636 inst.instruction |= (ubit != 0) << 24;
14639 inst.instruction |= neon_logbits (size) << 18;
14641 neon_dp_fixup (&inst);
14644 /* Neon instruction encoders, in approximate order of appearance. */
14647 do_neon_dyadic_i_su (void)
14649 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14650 struct neon_type_el et = neon_check_type (3, rs,
14651 N_EQK, N_EQK, N_SU_32 | N_KEY);
14652 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14656 do_neon_dyadic_i64_su (void)
14658 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14659 struct neon_type_el et = neon_check_type (3, rs,
14660 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14661 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14665 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
14668 unsigned size = et.size >> 3;
14669 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14670 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14671 inst.instruction |= LOW4 (inst.operands[1].reg);
14672 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
14673 inst.instruction |= (isquad != 0) << 6;
14674 inst.instruction |= immbits << 16;
14675 inst.instruction |= (size >> 3) << 7;
14676 inst.instruction |= (size & 0x7) << 19;
14678 inst.instruction |= (uval != 0) << 24;
14680 neon_dp_fixup (&inst);
14684 do_neon_shl_imm (void)
14686 if (!inst.operands[2].isreg)
14688 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14689 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
14690 int imm = inst.operands[2].imm;
14692 constraint (imm < 0 || (unsigned)imm >= et.size,
14693 _("immediate out of range for shift"));
14694 NEON_ENCODE (IMMED, inst);
14695 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
14699 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14700 struct neon_type_el et = neon_check_type (3, rs,
14701 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14704 /* VSHL/VQSHL 3-register variants have syntax such as:
14706 whereas other 3-register operations encoded by neon_three_same have
14709 (i.e. with Dn & Dm reversed). Swap operands[1].reg and operands[2].reg
14711 tmp = inst.operands[2].reg;
14712 inst.operands[2].reg = inst.operands[1].reg;
14713 inst.operands[1].reg = tmp;
14714 NEON_ENCODE (INTEGER, inst);
14715 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14720 do_neon_qshl_imm (void)
14722 if (!inst.operands[2].isreg)
14724 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
14725 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
14726 int imm = inst.operands[2].imm;
14728 constraint (imm < 0 || (unsigned)imm >= et.size,
14729 _("immediate out of range for shift"));
14730 NEON_ENCODE (IMMED, inst);
14731 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
14735 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14736 struct neon_type_el et = neon_check_type (3, rs,
14737 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
14740 /* See note in do_neon_shl_imm. */
14741 tmp = inst.operands[2].reg;
14742 inst.operands[2].reg = inst.operands[1].reg;
14743 inst.operands[1].reg = tmp;
14744 NEON_ENCODE (INTEGER, inst);
14745 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14750 do_neon_rshl (void)
14752 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14753 struct neon_type_el et = neon_check_type (3, rs,
14754 N_EQK, N_EQK, N_SU_ALL | N_KEY);
14757 tmp = inst.operands[2].reg;
14758 inst.operands[2].reg = inst.operands[1].reg;
14759 inst.operands[1].reg = tmp;
14760 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
14764 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
14766 /* Handle .I8 pseudo-instructions. */
14769 /* Unfortunately, this will make everything apart from zero out-of-range.
14770 FIXME is this the intended semantics? There doesn't seem much point in
14771 accepting .I8 if so. */
14772 immediate |= immediate << 8;
14778 if (immediate == (immediate & 0x000000ff))
14780 *immbits = immediate;
14783 else if (immediate == (immediate & 0x0000ff00))
14785 *immbits = immediate >> 8;
14788 else if (immediate == (immediate & 0x00ff0000))
14790 *immbits = immediate >> 16;
14793 else if (immediate == (immediate & 0xff000000))
14795 *immbits = immediate >> 24;
14798 if ((immediate & 0xffff) != (immediate >> 16))
14799 goto bad_immediate;
14800 immediate &= 0xffff;
14803 if (immediate == (immediate & 0x000000ff))
14805 *immbits = immediate;
14808 else if (immediate == (immediate & 0x0000ff00))
14810 *immbits = immediate >> 8;
14815 first_error (_("immediate value out of range"));
14820 do_neon_logic (void)
14822 if (inst.operands[2].present && inst.operands[2].isreg)
14824 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14825 neon_check_type (3, rs, N_IGNORE_TYPE);
14826 /* U bit and size field were set as part of the bitmask. */
14827 NEON_ENCODE (INTEGER, inst);
14828 neon_three_same (neon_quad (rs), 0, -1);
14832 const int three_ops_form = (inst.operands[2].present
14833 && !inst.operands[2].isreg);
14834 const int immoperand = (three_ops_form ? 2 : 1);
14835 enum neon_shape rs = (three_ops_form
14836 ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
14837 : neon_select_shape (NS_DI, NS_QI, NS_NULL));
14838 struct neon_type_el et = neon_check_type (2, rs,
14839 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
14840 enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
14844 if (et.type == NT_invtype)
14847 if (three_ops_form)
14848 constraint (inst.operands[0].reg != inst.operands[1].reg,
14849 _("first and second operands shall be the same register"));
14851 NEON_ENCODE (IMMED, inst);
14853 immbits = inst.operands[immoperand].imm;
14856 /* .i64 is a pseudo-op, so the immediate must be a repeating
14858 if (immbits != (inst.operands[immoperand].regisimm ?
14859 inst.operands[immoperand].reg : 0))
14861 /* Set immbits to an invalid constant. */
14862 immbits = 0xdeadbeef;
14869 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14873 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14877 /* Pseudo-instruction for VBIC. */
14878 neon_invert_size (&immbits, 0, et.size);
14879 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14883 /* Pseudo-instruction for VORR. */
14884 neon_invert_size (&immbits, 0, et.size);
14885 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
14895 inst.instruction |= neon_quad (rs) << 6;
14896 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
14897 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
14898 inst.instruction |= cmode << 8;
14899 neon_write_immbits (immbits);
14901 neon_dp_fixup (&inst);
14906 do_neon_bitfield (void)
14908 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14909 neon_check_type (3, rs, N_IGNORE_TYPE);
14910 neon_three_same (neon_quad (rs), 0, -1);
14914 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
14917 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
14918 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
14920 if (et.type == NT_float)
14922 NEON_ENCODE (FLOAT, inst);
14923 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
14927 NEON_ENCODE (INTEGER, inst);
14928 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
14933 do_neon_dyadic_if_su (void)
14935 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14939 do_neon_dyadic_if_su_d (void)
14941 /* This version only allow D registers, but that constraint is enforced during
14942 operand parsing so we don't need to do anything extra here. */
14943 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
14947 do_neon_dyadic_if_i_d (void)
14949 /* The "untyped" case can't happen. Do this to stop the "U" bit being
14950 affected if we specify unsigned args. */
14951 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
14954 enum vfp_or_neon_is_neon_bits
14957 NEON_CHECK_ARCH = 2,
14958 NEON_CHECK_ARCH8 = 4
14961 /* Call this function if an instruction which may have belonged to the VFP or
14962 Neon instruction sets, but turned out to be a Neon instruction (due to the
14963 operand types involved, etc.). We have to check and/or fix-up a couple of
14966 - Make sure the user hasn't attempted to make a Neon instruction
14968 - Alter the value in the condition code field if necessary.
14969 - Make sure that the arch supports Neon instructions.
14971 Which of these operations take place depends on bits from enum
14972 vfp_or_neon_is_neon_bits.
14974 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
14975 current instruction's condition is COND_ALWAYS, the condition field is
14976 changed to inst.uncond_value. This is necessary because instructions shared
14977 between VFP and Neon may be conditional for the VFP variants only, and the
14978 unconditional Neon version must have, e.g., 0xF in the condition field. */
14981 vfp_or_neon_is_neon (unsigned check)
14983 /* Conditions are always legal in Thumb mode (IT blocks). */
14984 if (!thumb_mode && (check & NEON_CHECK_CC))
14986 if (inst.cond != COND_ALWAYS)
14988 first_error (_(BAD_COND));
14991 if (inst.uncond_value != -1)
14992 inst.instruction |= inst.uncond_value << 28;
14995 if ((check & NEON_CHECK_ARCH)
14996 && !mark_feature_used (&fpu_neon_ext_v1))
14998 first_error (_(BAD_FPU));
15002 if ((check & NEON_CHECK_ARCH8)
15003 && !mark_feature_used (&fpu_neon_ext_armv8))
15005 first_error (_(BAD_FPU));
15013 do_neon_addsub_if_i (void)
15015 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
15018 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15021 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15022 affected if we specify unsigned args. */
15023 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
15026 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
15028 V<op> A,B (A is operand 0, B is operand 2)
15033 so handle that case specially. */
15036 neon_exchange_operands (void)
15038 if (inst.operands[1].present)
15040 void *scratch = xmalloc (sizeof (inst.operands[0]));
15042 /* Swap operands[1] and operands[2]. */
15043 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
15044 inst.operands[1] = inst.operands[2];
15045 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
15050 inst.operands[1] = inst.operands[2];
15051 inst.operands[2] = inst.operands[0];
15056 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
15058 if (inst.operands[2].isreg)
15061 neon_exchange_operands ();
15062 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
15066 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15067 struct neon_type_el et = neon_check_type (2, rs,
15068 N_EQK | N_SIZ, immtypes | N_KEY);
15070 NEON_ENCODE (IMMED, inst);
15071 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15072 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15073 inst.instruction |= LOW4 (inst.operands[1].reg);
15074 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15075 inst.instruction |= neon_quad (rs) << 6;
15076 inst.instruction |= (et.type == NT_float) << 10;
15077 inst.instruction |= neon_logbits (et.size) << 18;
15079 neon_dp_fixup (&inst);
15086 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
15090 do_neon_cmp_inv (void)
15092 neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
15098 neon_compare (N_IF_32, N_IF_32, FALSE);
15101 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
15102 scalars, which are encoded in 5 bits, M : Rm.
15103 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
15104 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
15107 Dot Product instructions are similar to multiply instructions except elsize
15108 should always be 32.
15110 This function translates SCALAR, which is GAS's internal encoding of indexed
15111 scalar register, to raw encoding. There is also register and index range
15112 check based on ELSIZE. */
15115 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
15117 unsigned regno = NEON_SCALAR_REG (scalar);
15118 unsigned elno = NEON_SCALAR_INDEX (scalar);
15123 if (regno > 7 || elno > 3)
15125 return regno | (elno << 3);
15128 if (regno > 15 || elno > 1)
15130 return regno | (elno << 4);
15134 first_error (_("scalar out of range for multiply instruction"));
15140 /* Encode multiply / multiply-accumulate scalar instructions. */
15143 neon_mul_mac (struct neon_type_el et, int ubit)
15147 /* Give a more helpful error message if we have an invalid type. */
15148 if (et.type == NT_invtype)
15151 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
15152 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15153 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15154 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
15155 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
15156 inst.instruction |= LOW4 (scalar);
15157 inst.instruction |= HI1 (scalar) << 5;
15158 inst.instruction |= (et.type == NT_float) << 8;
15159 inst.instruction |= neon_logbits (et.size) << 20;
15160 inst.instruction |= (ubit != 0) << 24;
15162 neon_dp_fixup (&inst);
15166 do_neon_mac_maybe_scalar (void)
15168 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
15171 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15174 if (inst.operands[2].isscalar)
15176 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15177 struct neon_type_el et = neon_check_type (3, rs,
15178 N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
15179 NEON_ENCODE (SCALAR, inst);
15180 neon_mul_mac (et, neon_quad (rs));
15184 /* The "untyped" case can't happen. Do this to stop the "U" bit being
15185 affected if we specify unsigned args. */
15186 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15191 do_neon_fmac (void)
15193 if (try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
15196 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15199 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
15205 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15206 struct neon_type_el et = neon_check_type (3, rs,
15207 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
15208 neon_three_same (neon_quad (rs), 0, et.size);
15211 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
15212 same types as the MAC equivalents. The polynomial type for this instruction
15213 is encoded the same as the integer type. */
15218 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
15221 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15224 if (inst.operands[2].isscalar)
15225 do_neon_mac_maybe_scalar ();
15227 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
15231 do_neon_qdmulh (void)
15233 if (inst.operands[2].isscalar)
15235 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15236 struct neon_type_el et = neon_check_type (3, rs,
15237 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15238 NEON_ENCODE (SCALAR, inst);
15239 neon_mul_mac (et, neon_quad (rs));
15243 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15244 struct neon_type_el et = neon_check_type (3, rs,
15245 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15246 NEON_ENCODE (INTEGER, inst);
15247 /* The U bit (rounding) comes from bit mask. */
15248 neon_three_same (neon_quad (rs), 0, et.size);
15253 do_neon_qrdmlah (void)
15255 /* Check we're on the correct architecture. */
15256 if (!mark_feature_used (&fpu_neon_ext_armv8))
15258 _("instruction form not available on this architecture.");
15259 else if (!mark_feature_used (&fpu_neon_ext_v8_1))
15261 as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
15262 record_feature_use (&fpu_neon_ext_v8_1);
15265 if (inst.operands[2].isscalar)
15267 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
15268 struct neon_type_el et = neon_check_type (3, rs,
15269 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15270 NEON_ENCODE (SCALAR, inst);
15271 neon_mul_mac (et, neon_quad (rs));
15275 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15276 struct neon_type_el et = neon_check_type (3, rs,
15277 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
15278 NEON_ENCODE (INTEGER, inst);
15279 /* The U bit (rounding) comes from bit mask. */
15280 neon_three_same (neon_quad (rs), 0, et.size);
15285 do_neon_fcmp_absolute (void)
15287 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15288 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15289 N_F_16_32 | N_KEY);
15290 /* Size field comes from bit mask. */
15291 neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
15295 do_neon_fcmp_absolute_inv (void)
15297 neon_exchange_operands ();
15298 do_neon_fcmp_absolute ();
15302 do_neon_step (void)
15304 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
15305 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
15306 N_F_16_32 | N_KEY);
15307 neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
15311 do_neon_abs_neg (void)
15313 enum neon_shape rs;
15314 struct neon_type_el et;
15316 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
15319 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15322 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
15323 et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
15325 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15326 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15327 inst.instruction |= LOW4 (inst.operands[1].reg);
15328 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15329 inst.instruction |= neon_quad (rs) << 6;
15330 inst.instruction |= (et.type == NT_float) << 10;
15331 inst.instruction |= neon_logbits (et.size) << 18;
15333 neon_dp_fixup (&inst);
15339 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15340 struct neon_type_el et = neon_check_type (2, rs,
15341 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15342 int imm = inst.operands[2].imm;
15343 constraint (imm < 0 || (unsigned)imm >= et.size,
15344 _("immediate out of range for insert"));
15345 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15351 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15352 struct neon_type_el et = neon_check_type (2, rs,
15353 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
15354 int imm = inst.operands[2].imm;
15355 constraint (imm < 1 || (unsigned)imm > et.size,
15356 _("immediate out of range for insert"));
15357 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
15361 do_neon_qshlu_imm (void)
15363 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
15364 struct neon_type_el et = neon_check_type (2, rs,
15365 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
15366 int imm = inst.operands[2].imm;
15367 constraint (imm < 0 || (unsigned)imm >= et.size,
15368 _("immediate out of range for shift"));
15369 /* Only encodes the 'U present' variant of the instruction.
15370 In this case, signed types have OP (bit 8) set to 0.
15371 Unsigned types have OP set to 1. */
15372 inst.instruction |= (et.type == NT_unsigned) << 8;
15373 /* The rest of the bits are the same as other immediate shifts. */
15374 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
15378 do_neon_qmovn (void)
15380 struct neon_type_el et = neon_check_type (2, NS_DQ,
15381 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15382 /* Saturating move where operands can be signed or unsigned, and the
15383 destination has the same signedness. */
15384 NEON_ENCODE (INTEGER, inst);
15385 if (et.type == NT_unsigned)
15386 inst.instruction |= 0xc0;
15388 inst.instruction |= 0x80;
15389 neon_two_same (0, 1, et.size / 2);
15393 do_neon_qmovun (void)
15395 struct neon_type_el et = neon_check_type (2, NS_DQ,
15396 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15397 /* Saturating move with unsigned results. Operands must be signed. */
15398 NEON_ENCODE (INTEGER, inst);
15399 neon_two_same (0, 1, et.size / 2);
15403 do_neon_rshift_sat_narrow (void)
15405 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15406 or unsigned. If operands are unsigned, results must also be unsigned. */
15407 struct neon_type_el et = neon_check_type (2, NS_DQI,
15408 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
15409 int imm = inst.operands[2].imm;
15410 /* This gets the bounds check, size encoding and immediate bits calculation
15414 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
15415 VQMOVN.I<size> <Dd>, <Qm>. */
15418 inst.operands[2].present = 0;
15419 inst.instruction = N_MNEM_vqmovn;
15424 constraint (imm < 1 || (unsigned)imm > et.size,
15425 _("immediate out of range"));
15426 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
15430 do_neon_rshift_sat_narrow_u (void)
15432 /* FIXME: Types for narrowing. If operands are signed, results can be signed
15433 or unsigned. If operands are unsigned, results must also be unsigned. */
15434 struct neon_type_el et = neon_check_type (2, NS_DQI,
15435 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
15436 int imm = inst.operands[2].imm;
15437 /* This gets the bounds check, size encoding and immediate bits calculation
15441 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
15442 VQMOVUN.I<size> <Dd>, <Qm>. */
15445 inst.operands[2].present = 0;
15446 inst.instruction = N_MNEM_vqmovun;
15451 constraint (imm < 1 || (unsigned)imm > et.size,
15452 _("immediate out of range"));
15453 /* FIXME: The manual is kind of unclear about what value U should have in
15454 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
15456 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
15460 do_neon_movn (void)
15462 struct neon_type_el et = neon_check_type (2, NS_DQ,
15463 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15464 NEON_ENCODE (INTEGER, inst);
15465 neon_two_same (0, 1, et.size / 2);
15469 do_neon_rshift_narrow (void)
15471 struct neon_type_el et = neon_check_type (2, NS_DQI,
15472 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
15473 int imm = inst.operands[2].imm;
15474 /* This gets the bounds check, size encoding and immediate bits calculation
15478 /* If immediate is zero then we are a pseudo-instruction for
15479 VMOVN.I<size> <Dd>, <Qm> */
15482 inst.operands[2].present = 0;
15483 inst.instruction = N_MNEM_vmovn;
15488 constraint (imm < 1 || (unsigned)imm > et.size,
15489 _("immediate out of range for narrowing operation"));
15490 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
15494 do_neon_shll (void)
15496 /* FIXME: Type checking when lengthening. */
15497 struct neon_type_el et = neon_check_type (2, NS_QDI,
15498 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
15499 unsigned imm = inst.operands[2].imm;
15501 if (imm == et.size)
15503 /* Maximum shift variant. */
15504 NEON_ENCODE (INTEGER, inst);
15505 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15506 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15507 inst.instruction |= LOW4 (inst.operands[1].reg);
15508 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15509 inst.instruction |= neon_logbits (et.size) << 18;
15511 neon_dp_fixup (&inst);
15515 /* A more-specific type check for non-max versions. */
15516 et = neon_check_type (2, NS_QDI,
15517 N_EQK | N_DBL, N_SU_32 | N_KEY);
15518 NEON_ENCODE (IMMED, inst);
15519 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
15523 /* Check the various types for the VCVT instruction, and return which version
15524 the current instruction is. */
15526 #define CVT_FLAVOUR_VAR \
15527 CVT_VAR (s32_f32, N_S32, N_F32, whole_reg, "ftosls", "ftosis", "ftosizs") \
15528 CVT_VAR (u32_f32, N_U32, N_F32, whole_reg, "ftouls", "ftouis", "ftouizs") \
15529 CVT_VAR (f32_s32, N_F32, N_S32, whole_reg, "fsltos", "fsitos", NULL) \
15530 CVT_VAR (f32_u32, N_F32, N_U32, whole_reg, "fultos", "fuitos", NULL) \
15531 /* Half-precision conversions. */ \
15532 CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15533 CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL) \
15534 CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL) \
15535 CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL) \
15536 CVT_VAR (f32_f16, N_F32, N_F16, whole_reg, NULL, NULL, NULL) \
15537 CVT_VAR (f16_f32, N_F16, N_F32, whole_reg, NULL, NULL, NULL) \
15538 /* New VCVT instructions introduced by ARMv8.2 fp16 extension. \
15539 Compared with single/double precision variants, only the co-processor \
15540 field is different, so the encoding flow is reused here. */ \
15541 CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL) \
15542 CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL) \
15543 CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
15544 CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
15545 /* VFP instructions. */ \
15546 CVT_VAR (f32_f64, N_F32, N_F64, N_VFP, NULL, "fcvtsd", NULL) \
15547 CVT_VAR (f64_f32, N_F64, N_F32, N_VFP, NULL, "fcvtds", NULL) \
15548 CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
15549 CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
15550 CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL) \
15551 CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL) \
15552 /* VFP instructions with bitshift. */ \
15553 CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL, NULL) \
15554 CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL, NULL) \
15555 CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL, NULL) \
15556 CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL, NULL) \
15557 CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL, NULL) \
15558 CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL, NULL) \
15559 CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL, NULL) \
15560 CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL, NULL)
15562 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
15563 neon_cvt_flavour_##C,
15565 /* The different types of conversions we can do. */
15566 enum neon_cvt_flavour
15569 neon_cvt_flavour_invalid,
15570 neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
15575 static enum neon_cvt_flavour
15576 get_neon_cvt_flavour (enum neon_shape rs)
15578 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN) \
15579 et = neon_check_type (2, rs, (R) | (X), (R) | (Y)); \
15580 if (et.type != NT_invtype) \
15582 inst.error = NULL; \
15583 return (neon_cvt_flavour_##C); \
15586 struct neon_type_el et;
15587 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
15588 || rs == NS_FF) ? N_VFP : 0;
15589 /* The instruction versions which take an immediate take one register
15590 argument, which is extended to the width of the full register. Thus the
15591 "source" and "destination" registers must have the same width. Hack that
15592 here by making the size equal to the key (wider, in this case) operand. */
15593 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
15597 return neon_cvt_flavour_invalid;
15612 /* Neon-syntax VFP conversions. */
15615 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
15617 const char *opname = 0;
15619 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
15620 || rs == NS_FHI || rs == NS_HFI)
15622 /* Conversions with immediate bitshift. */
15623 const char *enc[] =
15625 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
15631 if (flavour < (int) ARRAY_SIZE (enc))
15633 opname = enc[flavour];
15634 constraint (inst.operands[0].reg != inst.operands[1].reg,
15635 _("operands 0 and 1 must be the same register"));
15636 inst.operands[1] = inst.operands[2];
15637 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
15642 /* Conversions without bitshift. */
15643 const char *enc[] =
15645 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
15651 if (flavour < (int) ARRAY_SIZE (enc))
15652 opname = enc[flavour];
15656 do_vfp_nsyn_opcode (opname);
15658 /* ARMv8.2 fp16 VCVT instruction. */
15659 if (flavour == neon_cvt_flavour_s32_f16
15660 || flavour == neon_cvt_flavour_u32_f16
15661 || flavour == neon_cvt_flavour_f16_u32
15662 || flavour == neon_cvt_flavour_f16_s32)
15663 do_scalar_fp16_v82_encode ();
15667 do_vfp_nsyn_cvtz (void)
15669 enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
15670 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15671 const char *enc[] =
15673 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
15679 if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
15680 do_vfp_nsyn_opcode (enc[flavour]);
15684 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
15685 enum neon_cvt_mode mode)
15690 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
15691 D register operands. */
15692 if (flavour == neon_cvt_flavour_s32_f64
15693 || flavour == neon_cvt_flavour_u32_f64)
15694 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
15697 if (flavour == neon_cvt_flavour_s32_f16
15698 || flavour == neon_cvt_flavour_u32_f16)
15699 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
15702 set_it_insn_type (OUTSIDE_IT_INSN);
15706 case neon_cvt_flavour_s32_f64:
15710 case neon_cvt_flavour_s32_f32:
15714 case neon_cvt_flavour_s32_f16:
15718 case neon_cvt_flavour_u32_f64:
15722 case neon_cvt_flavour_u32_f32:
15726 case neon_cvt_flavour_u32_f16:
15731 first_error (_("invalid instruction shape"));
15737 case neon_cvt_mode_a: rm = 0; break;
15738 case neon_cvt_mode_n: rm = 1; break;
15739 case neon_cvt_mode_p: rm = 2; break;
15740 case neon_cvt_mode_m: rm = 3; break;
15741 default: first_error (_("invalid rounding mode")); return;
15744 NEON_ENCODE (FPV8, inst);
15745 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
15746 encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
15747 inst.instruction |= sz << 8;
15749 /* ARMv8.2 fp16 VCVT instruction. */
15750 if (flavour == neon_cvt_flavour_s32_f16
15751 ||flavour == neon_cvt_flavour_u32_f16)
15752 do_scalar_fp16_v82_encode ();
15753 inst.instruction |= op << 7;
15754 inst.instruction |= rm << 16;
15755 inst.instruction |= 0xf0000000;
15756 inst.is_neon = TRUE;
15760 do_neon_cvt_1 (enum neon_cvt_mode mode)
15762 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
15763 NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
15764 NS_FH, NS_HF, NS_FHI, NS_HFI,
15766 enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
15768 if (flavour == neon_cvt_flavour_invalid)
15771 /* PR11109: Handle round-to-zero for VCVT conversions. */
15772 if (mode == neon_cvt_mode_z
15773 && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
15774 && (flavour == neon_cvt_flavour_s16_f16
15775 || flavour == neon_cvt_flavour_u16_f16
15776 || flavour == neon_cvt_flavour_s32_f32
15777 || flavour == neon_cvt_flavour_u32_f32
15778 || flavour == neon_cvt_flavour_s32_f64
15779 || flavour == neon_cvt_flavour_u32_f64)
15780 && (rs == NS_FD || rs == NS_FF))
15782 do_vfp_nsyn_cvtz ();
15786 /* ARMv8.2 fp16 VCVT conversions. */
15787 if (mode == neon_cvt_mode_z
15788 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
15789 && (flavour == neon_cvt_flavour_s32_f16
15790 || flavour == neon_cvt_flavour_u32_f16)
15793 do_vfp_nsyn_cvtz ();
15794 do_scalar_fp16_v82_encode ();
15798 /* VFP rather than Neon conversions. */
15799 if (flavour >= neon_cvt_flavour_first_fp)
15801 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15802 do_vfp_nsyn_cvt (rs, flavour);
15804 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15815 unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
15816 0x0000100, 0x1000100, 0x0, 0x1000000};
15818 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15821 /* Fixed-point conversion with #0 immediate is encoded as an
15822 integer conversion. */
15823 if (inst.operands[2].present && inst.operands[2].imm == 0)
15825 NEON_ENCODE (IMMED, inst);
15826 if (flavour != neon_cvt_flavour_invalid)
15827 inst.instruction |= enctab[flavour];
15828 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15829 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15830 inst.instruction |= LOW4 (inst.operands[1].reg);
15831 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15832 inst.instruction |= neon_quad (rs) << 6;
15833 inst.instruction |= 1 << 21;
15834 if (flavour < neon_cvt_flavour_s16_f16)
15836 inst.instruction |= 1 << 21;
15837 immbits = 32 - inst.operands[2].imm;
15838 inst.instruction |= immbits << 16;
15842 inst.instruction |= 3 << 20;
15843 immbits = 16 - inst.operands[2].imm;
15844 inst.instruction |= immbits << 16;
15845 inst.instruction &= ~(1 << 9);
15848 neon_dp_fixup (&inst);
15854 if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
15856 NEON_ENCODE (FLOAT, inst);
15857 set_it_insn_type (OUTSIDE_IT_INSN);
15859 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
15862 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15863 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15864 inst.instruction |= LOW4 (inst.operands[1].reg);
15865 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15866 inst.instruction |= neon_quad (rs) << 6;
15867 inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
15868 || flavour == neon_cvt_flavour_u32_f32) << 7;
15869 inst.instruction |= mode << 8;
15870 if (flavour == neon_cvt_flavour_u16_f16
15871 || flavour == neon_cvt_flavour_s16_f16)
15872 /* Mask off the original size bits and reencode them. */
15873 inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
15876 inst.instruction |= 0xfc000000;
15878 inst.instruction |= 0xf0000000;
15884 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
15885 0x100, 0x180, 0x0, 0x080};
15887 NEON_ENCODE (INTEGER, inst);
15889 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
15892 if (flavour != neon_cvt_flavour_invalid)
15893 inst.instruction |= enctab[flavour];
15895 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15896 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15897 inst.instruction |= LOW4 (inst.operands[1].reg);
15898 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15899 inst.instruction |= neon_quad (rs) << 6;
15900 if (flavour >= neon_cvt_flavour_s16_f16
15901 && flavour <= neon_cvt_flavour_f16_u16)
15902 /* Half precision. */
15903 inst.instruction |= 1 << 18;
15905 inst.instruction |= 2 << 18;
15907 neon_dp_fixup (&inst);
15912 /* Half-precision conversions for Advanced SIMD -- neon. */
15917 && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
15919 as_bad (_("operand size must match register width"));
15924 && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
15926 as_bad (_("operand size must match register width"));
15931 inst.instruction = 0x3b60600;
15933 inst.instruction = 0x3b60700;
15935 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
15936 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
15937 inst.instruction |= LOW4 (inst.operands[1].reg);
15938 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
15939 neon_dp_fixup (&inst);
15943 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
15944 if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
15945 do_vfp_nsyn_cvt (rs, flavour);
15947 do_vfp_nsyn_cvt_fpv8 (flavour, mode);
15952 do_neon_cvtr (void)
15954 do_neon_cvt_1 (neon_cvt_mode_x);
15960 do_neon_cvt_1 (neon_cvt_mode_z);
15964 do_neon_cvta (void)
15966 do_neon_cvt_1 (neon_cvt_mode_a);
15970 do_neon_cvtn (void)
15972 do_neon_cvt_1 (neon_cvt_mode_n);
15976 do_neon_cvtp (void)
15978 do_neon_cvt_1 (neon_cvt_mode_p);
15982 do_neon_cvtm (void)
15984 do_neon_cvt_1 (neon_cvt_mode_m);
15988 do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
15991 mark_feature_used (&fpu_vfp_ext_armv8);
15993 encode_arm_vfp_reg (inst.operands[0].reg,
15994 (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
15995 encode_arm_vfp_reg (inst.operands[1].reg,
15996 (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
15997 inst.instruction |= to ? 0x10000 : 0;
15998 inst.instruction |= t ? 0x80 : 0;
15999 inst.instruction |= is_double ? 0x100 : 0;
16000 do_vfp_cond_or_thumb ();
16004 do_neon_cvttb_1 (bfd_boolean t)
16006 enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
16007 NS_DF, NS_DH, NS_NULL);
16011 else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
16014 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
16016 else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
16019 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
16021 else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
16023 /* The VCVTB and VCVTT instructions with D-register operands
16024 don't work for SP only targets. */
16025 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16029 do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
16031 else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
16033 /* The VCVTB and VCVTT instructions with D-register operands
16034 don't work for SP only targets. */
16035 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16039 do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
16046 do_neon_cvtb (void)
16048 do_neon_cvttb_1 (FALSE);
16053 do_neon_cvtt (void)
16055 do_neon_cvttb_1 (TRUE);
16059 neon_move_immediate (void)
16061 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
16062 struct neon_type_el et = neon_check_type (2, rs,
16063 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
16064 unsigned immlo, immhi = 0, immbits;
16065 int op, cmode, float_p;
16067 constraint (et.type == NT_invtype,
16068 _("operand size must be specified for immediate VMOV"));
16070 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
16071 op = (inst.instruction & (1 << 5)) != 0;
16073 immlo = inst.operands[1].imm;
16074 if (inst.operands[1].regisimm)
16075 immhi = inst.operands[1].reg;
16077 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
16078 _("immediate has bits set outside the operand size"));
16080 float_p = inst.operands[1].immisfloat;
16082 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
16083 et.size, et.type)) == FAIL)
16085 /* Invert relevant bits only. */
16086 neon_invert_size (&immlo, &immhi, et.size);
16087 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
16088 with one or the other; those cases are caught by
16089 neon_cmode_for_move_imm. */
16091 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
16092 &op, et.size, et.type)) == FAIL)
16094 first_error (_("immediate out of range"));
16099 inst.instruction &= ~(1 << 5);
16100 inst.instruction |= op << 5;
16102 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16103 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16104 inst.instruction |= neon_quad (rs) << 6;
16105 inst.instruction |= cmode << 8;
16107 neon_write_immbits (immbits);
16113 if (inst.operands[1].isreg)
16115 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16117 NEON_ENCODE (INTEGER, inst);
16118 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16119 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16120 inst.instruction |= LOW4 (inst.operands[1].reg);
16121 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16122 inst.instruction |= neon_quad (rs) << 6;
16126 NEON_ENCODE (IMMED, inst);
16127 neon_move_immediate ();
16130 neon_dp_fixup (&inst);
16133 /* Encode instructions of form:
16135 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
16136 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm | */
16139 neon_mixed_length (struct neon_type_el et, unsigned size)
16141 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16142 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16143 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16144 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16145 inst.instruction |= LOW4 (inst.operands[2].reg);
16146 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16147 inst.instruction |= (et.type == NT_unsigned) << 24;
16148 inst.instruction |= neon_logbits (size) << 20;
16150 neon_dp_fixup (&inst);
16154 do_neon_dyadic_long (void)
16156 /* FIXME: Type checking for lengthening op. */
16157 struct neon_type_el et = neon_check_type (3, NS_QDD,
16158 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
16159 neon_mixed_length (et, et.size);
16163 do_neon_abal (void)
16165 struct neon_type_el et = neon_check_type (3, NS_QDD,
16166 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
16167 neon_mixed_length (et, et.size);
16171 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
16173 if (inst.operands[2].isscalar)
16175 struct neon_type_el et = neon_check_type (3, NS_QDS,
16176 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
16177 NEON_ENCODE (SCALAR, inst);
16178 neon_mul_mac (et, et.type == NT_unsigned);
16182 struct neon_type_el et = neon_check_type (3, NS_QDD,
16183 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
16184 NEON_ENCODE (INTEGER, inst);
16185 neon_mixed_length (et, et.size);
16190 do_neon_mac_maybe_scalar_long (void)
16192 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
16195 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
16196 internal SCALAR. QUAD_P is 1 if it's for Q format, otherwise it's 0. */
16199 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
16201 unsigned regno = NEON_SCALAR_REG (scalar);
16202 unsigned elno = NEON_SCALAR_INDEX (scalar);
16206 if (regno > 7 || elno > 3)
16209 return ((regno & 0x7)
16210 | ((elno & 0x1) << 3)
16211 | (((elno >> 1) & 0x1) << 5));
16215 if (regno > 15 || elno > 1)
16218 return (((regno & 0x1) << 5)
16219 | ((regno >> 1) & 0x7)
16220 | ((elno & 0x1) << 3));
16224 first_error (_("scalar out of range for multiply instruction"));
16229 do_neon_fmac_maybe_scalar_long (int subtype)
16231 enum neon_shape rs;
16233 /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding. 'size"
16234 field (bits[21:20]) has different meaning. For scalar index variant, it's
16235 used to differentiate add and subtract, otherwise it's with fixed value
16239 if (inst.cond != COND_ALWAYS)
16240 as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
16241 "behaviour is UNPREDICTABLE"));
16243 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
16246 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
16249 /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
16250 be a scalar index register. */
16251 if (inst.operands[2].isscalar)
16253 high8 = 0xfe000000;
16256 rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
16260 high8 = 0xfc000000;
16263 inst.instruction |= (0x1 << 23);
16264 rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
16267 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16);
16269 /* "opcode" from template has included "ubit", so simply pass 0 here. Also,
16270 the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
16271 so we simply pass -1 as size. */
16272 unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
16273 neon_three_same (quad_p, 0, size);
16275 /* Undo neon_dp_fixup. Redo the high eight bits. */
16276 inst.instruction &= 0x00ffffff;
16277 inst.instruction |= high8;
16279 #define LOW1(R) ((R) & 0x1)
16280 #define HI4(R) (((R) >> 1) & 0xf)
16281 /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
16282 whether the instruction is in Q form and whether Vm is a scalar indexed
16284 if (inst.operands[2].isscalar)
16287 = neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
16288 inst.instruction &= 0xffffffd0;
16289 inst.instruction |= rm;
16293 /* Redo Rn as well. */
16294 inst.instruction &= 0xfff0ff7f;
16295 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16296 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16301 /* Redo Rn and Rm. */
16302 inst.instruction &= 0xfff0ff50;
16303 inst.instruction |= HI4 (inst.operands[1].reg) << 16;
16304 inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
16305 inst.instruction |= HI4 (inst.operands[2].reg);
16306 inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
16311 do_neon_vfmal (void)
16313 return do_neon_fmac_maybe_scalar_long (0);
16317 do_neon_vfmsl (void)
16319 return do_neon_fmac_maybe_scalar_long (1);
16323 do_neon_dyadic_wide (void)
16325 struct neon_type_el et = neon_check_type (3, NS_QQD,
16326 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
16327 neon_mixed_length (et, et.size);
16331 do_neon_dyadic_narrow (void)
16333 struct neon_type_el et = neon_check_type (3, NS_QDD,
16334 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
16335 /* Operand sign is unimportant, and the U bit is part of the opcode,
16336 so force the operand type to integer. */
16337 et.type = NT_integer;
16338 neon_mixed_length (et, et.size / 2);
16342 do_neon_mul_sat_scalar_long (void)
16344 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
16348 do_neon_vmull (void)
16350 if (inst.operands[2].isscalar)
16351 do_neon_mac_maybe_scalar_long ();
16354 struct neon_type_el et = neon_check_type (3, NS_QDD,
16355 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
16357 if (et.type == NT_poly)
16358 NEON_ENCODE (POLY, inst);
16360 NEON_ENCODE (INTEGER, inst);
16362 /* For polynomial encoding the U bit must be zero, and the size must
16363 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
16364 obviously, as 0b10). */
16367 /* Check we're on the correct architecture. */
16368 if (!mark_feature_used (&fpu_crypto_ext_armv8))
16370 _("Instruction form not available on this architecture.");
16375 neon_mixed_length (et, et.size);
16382 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
16383 struct neon_type_el et = neon_check_type (3, rs,
16384 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16385 unsigned imm = (inst.operands[3].imm * et.size) / 8;
16387 constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
16388 _("shift out of range"));
16389 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16390 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16391 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16392 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16393 inst.instruction |= LOW4 (inst.operands[2].reg);
16394 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16395 inst.instruction |= neon_quad (rs) << 6;
16396 inst.instruction |= imm << 8;
16398 neon_dp_fixup (&inst);
16404 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16405 struct neon_type_el et = neon_check_type (2, rs,
16406 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16407 unsigned op = (inst.instruction >> 7) & 3;
16408 /* N (width of reversed regions) is encoded as part of the bitmask. We
16409 extract it here to check the elements to be reversed are smaller.
16410 Otherwise we'd get a reserved instruction. */
16411 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
16412 gas_assert (elsize != 0);
16413 constraint (et.size >= elsize,
16414 _("elements must be smaller than reversal region"));
16415 neon_two_same (neon_quad (rs), 1, et.size);
16421 if (inst.operands[1].isscalar)
16423 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
16424 struct neon_type_el et = neon_check_type (2, rs,
16425 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16426 unsigned sizebits = et.size >> 3;
16427 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
16428 int logsize = neon_logbits (et.size);
16429 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
16431 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
16434 NEON_ENCODE (SCALAR, inst);
16435 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16436 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16437 inst.instruction |= LOW4 (dm);
16438 inst.instruction |= HI1 (dm) << 5;
16439 inst.instruction |= neon_quad (rs) << 6;
16440 inst.instruction |= x << 17;
16441 inst.instruction |= sizebits << 16;
16443 neon_dp_fixup (&inst);
16447 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
16448 struct neon_type_el et = neon_check_type (2, rs,
16449 N_8 | N_16 | N_32 | N_KEY, N_EQK);
16450 /* Duplicate ARM register to lanes of vector. */
16451 NEON_ENCODE (ARMREG, inst);
16454 case 8: inst.instruction |= 0x400000; break;
16455 case 16: inst.instruction |= 0x000020; break;
16456 case 32: inst.instruction |= 0x000000; break;
16459 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16460 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
16461 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
16462 inst.instruction |= neon_quad (rs) << 21;
16463 /* The encoding for this instruction is identical for the ARM and Thumb
16464 variants, except for the condition field. */
16465 do_vfp_cond_or_thumb ();
16469 /* VMOV has particularly many variations. It can be one of:
16470 0. VMOV<c><q> <Qd>, <Qm>
16471 1. VMOV<c><q> <Dd>, <Dm>
16472 (Register operations, which are VORR with Rm = Rn.)
16473 2. VMOV<c><q>.<dt> <Qd>, #<imm>
16474 3. VMOV<c><q>.<dt> <Dd>, #<imm>
16476 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
16477 (ARM register to scalar.)
16478 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
16479 (Two ARM registers to vector.)
16480 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
16481 (Scalar to ARM register.)
16482 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
16483 (Vector to two ARM registers.)
16484 8. VMOV.F32 <Sd>, <Sm>
16485 9. VMOV.F64 <Dd>, <Dm>
16486 (VFP register moves.)
16487 10. VMOV.F32 <Sd>, #imm
16488 11. VMOV.F64 <Dd>, #imm
16489 (VFP float immediate load.)
16490 12. VMOV <Rd>, <Sm>
16491 (VFP single to ARM reg.)
16492 13. VMOV <Sd>, <Rm>
16493 (ARM reg to VFP single.)
16494 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
16495 (Two ARM regs to two VFP singles.)
16496 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
16497 (Two VFP singles to two ARM regs.)
16499 These cases can be disambiguated using neon_select_shape, except cases 1/9
16500 and 3/11 which depend on the operand type too.
16502 All the encoded bits are hardcoded by this function.
16504 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
16505 Cases 5, 7 may be used with VFPv2 and above.
16507 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
16508 can specify a type where it doesn't make sense to, and is ignored). */
16513 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
16514 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR,
16515 NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
16516 NS_HR, NS_RH, NS_HI, NS_NULL);
16517 struct neon_type_el et;
16518 const char *ldconst = 0;
16522 case NS_DD: /* case 1/9. */
16523 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16524 /* It is not an error here if no type is given. */
16526 if (et.type == NT_float && et.size == 64)
16528 do_vfp_nsyn_opcode ("fcpyd");
16531 /* fall through. */
16533 case NS_QQ: /* case 0/1. */
16535 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16537 /* The architecture manual I have doesn't explicitly state which
16538 value the U bit should have for register->register moves, but
16539 the equivalent VORR instruction has U = 0, so do that. */
16540 inst.instruction = 0x0200110;
16541 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16542 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16543 inst.instruction |= LOW4 (inst.operands[1].reg);
16544 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16545 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16546 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16547 inst.instruction |= neon_quad (rs) << 6;
16549 neon_dp_fixup (&inst);
16553 case NS_DI: /* case 3/11. */
16554 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
16556 if (et.type == NT_float && et.size == 64)
16558 /* case 11 (fconstd). */
16559 ldconst = "fconstd";
16560 goto encode_fconstd;
16562 /* fall through. */
16564 case NS_QI: /* case 2/3. */
16565 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
16567 inst.instruction = 0x0800010;
16568 neon_move_immediate ();
16569 neon_dp_fixup (&inst);
16572 case NS_SR: /* case 4. */
16574 unsigned bcdebits = 0;
16576 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
16577 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
16579 /* .<size> is optional here, defaulting to .32. */
16580 if (inst.vectype.elems == 0
16581 && inst.operands[0].vectype.type == NT_invtype
16582 && inst.operands[1].vectype.type == NT_invtype)
16584 inst.vectype.el[0].type = NT_untyped;
16585 inst.vectype.el[0].size = 32;
16586 inst.vectype.elems = 1;
16589 et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
16590 logsize = neon_logbits (et.size);
16592 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16594 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16595 && et.size != 32, _(BAD_FPU));
16596 constraint (et.type == NT_invtype, _("bad type for scalar"));
16597 constraint (x >= 64 / et.size, _("scalar index out of range"));
16601 case 8: bcdebits = 0x8; break;
16602 case 16: bcdebits = 0x1; break;
16603 case 32: bcdebits = 0x0; break;
16607 bcdebits |= x << logsize;
16609 inst.instruction = 0xe000b10;
16610 do_vfp_cond_or_thumb ();
16611 inst.instruction |= LOW4 (dn) << 16;
16612 inst.instruction |= HI1 (dn) << 7;
16613 inst.instruction |= inst.operands[1].reg << 12;
16614 inst.instruction |= (bcdebits & 3) << 5;
16615 inst.instruction |= (bcdebits >> 2) << 21;
16619 case NS_DRR: /* case 5 (fmdrr). */
16620 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16623 inst.instruction = 0xc400b10;
16624 do_vfp_cond_or_thumb ();
16625 inst.instruction |= LOW4 (inst.operands[0].reg);
16626 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
16627 inst.instruction |= inst.operands[1].reg << 12;
16628 inst.instruction |= inst.operands[2].reg << 16;
16631 case NS_RS: /* case 6. */
16634 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
16635 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
16636 unsigned abcdebits = 0;
16638 /* .<dt> is optional here, defaulting to .32. */
16639 if (inst.vectype.elems == 0
16640 && inst.operands[0].vectype.type == NT_invtype
16641 && inst.operands[1].vectype.type == NT_invtype)
16643 inst.vectype.el[0].type = NT_untyped;
16644 inst.vectype.el[0].size = 32;
16645 inst.vectype.elems = 1;
16648 et = neon_check_type (2, NS_NULL,
16649 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
16650 logsize = neon_logbits (et.size);
16652 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
16654 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
16655 && et.size != 32, _(BAD_FPU));
16656 constraint (et.type == NT_invtype, _("bad type for scalar"));
16657 constraint (x >= 64 / et.size, _("scalar index out of range"));
16661 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
16662 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
16663 case 32: abcdebits = 0x00; break;
16667 abcdebits |= x << logsize;
16668 inst.instruction = 0xe100b10;
16669 do_vfp_cond_or_thumb ();
16670 inst.instruction |= LOW4 (dn) << 16;
16671 inst.instruction |= HI1 (dn) << 7;
16672 inst.instruction |= inst.operands[0].reg << 12;
16673 inst.instruction |= (abcdebits & 3) << 5;
16674 inst.instruction |= (abcdebits >> 2) << 21;
16678 case NS_RRD: /* case 7 (fmrrd). */
16679 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
16682 inst.instruction = 0xc500b10;
16683 do_vfp_cond_or_thumb ();
16684 inst.instruction |= inst.operands[0].reg << 12;
16685 inst.instruction |= inst.operands[1].reg << 16;
16686 inst.instruction |= LOW4 (inst.operands[2].reg);
16687 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16690 case NS_FF: /* case 8 (fcpys). */
16691 do_vfp_nsyn_opcode ("fcpys");
16695 case NS_FI: /* case 10 (fconsts). */
16696 ldconst = "fconsts";
16698 if (!inst.operands[1].immisfloat)
16701 /* Immediate has to fit in 8 bits so float is enough. */
16702 float imm = (float) inst.operands[1].imm;
16703 memcpy (&new_imm, &imm, sizeof (float));
16704 /* But the assembly may have been written to provide an integer
16705 bit pattern that equates to a float, so check that the
16706 conversion has worked. */
16707 if (is_quarter_float (new_imm))
16709 if (is_quarter_float (inst.operands[1].imm))
16710 as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
16712 inst.operands[1].imm = new_imm;
16713 inst.operands[1].immisfloat = 1;
16717 if (is_quarter_float (inst.operands[1].imm))
16719 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
16720 do_vfp_nsyn_opcode (ldconst);
16722 /* ARMv8.2 fp16 vmov.f16 instruction. */
16724 do_scalar_fp16_v82_encode ();
16727 first_error (_("immediate out of range"));
16731 case NS_RF: /* case 12 (fmrs). */
16732 do_vfp_nsyn_opcode ("fmrs");
16733 /* ARMv8.2 fp16 vmov.f16 instruction. */
16735 do_scalar_fp16_v82_encode ();
16739 case NS_FR: /* case 13 (fmsr). */
16740 do_vfp_nsyn_opcode ("fmsr");
16741 /* ARMv8.2 fp16 vmov.f16 instruction. */
16743 do_scalar_fp16_v82_encode ();
16746 /* The encoders for the fmrrs and fmsrr instructions expect three operands
16747 (one of which is a list), but we have parsed four. Do some fiddling to
16748 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
16750 case NS_RRFF: /* case 14 (fmrrs). */
16751 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
16752 _("VFP registers must be adjacent"));
16753 inst.operands[2].imm = 2;
16754 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16755 do_vfp_nsyn_opcode ("fmrrs");
16758 case NS_FFRR: /* case 15 (fmsrr). */
16759 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
16760 _("VFP registers must be adjacent"));
16761 inst.operands[1] = inst.operands[2];
16762 inst.operands[2] = inst.operands[3];
16763 inst.operands[0].imm = 2;
16764 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
16765 do_vfp_nsyn_opcode ("fmsrr");
16769 /* neon_select_shape has determined that the instruction
16770 shape is wrong and has already set the error message. */
16779 do_neon_rshift_round_imm (void)
16781 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16782 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
16783 int imm = inst.operands[2].imm;
16785 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
16788 inst.operands[2].present = 0;
16793 constraint (imm < 1 || (unsigned)imm > et.size,
16794 _("immediate out of range for shift"));
16795 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
16800 do_neon_movhf (void)
16802 enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
16803 constraint (rs != NS_HH, _("invalid suffix"));
16805 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
16808 if (inst.cond != COND_ALWAYS)
16812 as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
16813 " the behaviour is UNPREDICTABLE"));
16817 inst.error = BAD_COND;
16822 do_vfp_sp_monadic ();
16825 inst.instruction |= 0xf0000000;
16829 do_neon_movl (void)
16831 struct neon_type_el et = neon_check_type (2, NS_QD,
16832 N_EQK | N_DBL, N_SU_32 | N_KEY);
16833 unsigned sizebits = et.size >> 3;
16834 inst.instruction |= sizebits << 19;
16835 neon_two_same (0, et.type == NT_unsigned, -1);
16841 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16842 struct neon_type_el et = neon_check_type (2, rs,
16843 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16844 NEON_ENCODE (INTEGER, inst);
16845 neon_two_same (neon_quad (rs), 1, et.size);
16849 do_neon_zip_uzp (void)
16851 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16852 struct neon_type_el et = neon_check_type (2, rs,
16853 N_EQK, N_8 | N_16 | N_32 | N_KEY);
16854 if (rs == NS_DD && et.size == 32)
16856 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
16857 inst.instruction = N_MNEM_vtrn;
16861 neon_two_same (neon_quad (rs), 1, et.size);
16865 do_neon_sat_abs_neg (void)
16867 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16868 struct neon_type_el et = neon_check_type (2, rs,
16869 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16870 neon_two_same (neon_quad (rs), 1, et.size);
16874 do_neon_pair_long (void)
16876 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16877 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
16878 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
16879 inst.instruction |= (et.type == NT_unsigned) << 7;
16880 neon_two_same (neon_quad (rs), 1, et.size);
16884 do_neon_recip_est (void)
16886 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16887 struct neon_type_el et = neon_check_type (2, rs,
16888 N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
16889 inst.instruction |= (et.type == NT_float) << 8;
16890 neon_two_same (neon_quad (rs), 1, et.size);
16896 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16897 struct neon_type_el et = neon_check_type (2, rs,
16898 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
16899 neon_two_same (neon_quad (rs), 1, et.size);
16905 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16906 struct neon_type_el et = neon_check_type (2, rs,
16907 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
16908 neon_two_same (neon_quad (rs), 1, et.size);
16914 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16915 struct neon_type_el et = neon_check_type (2, rs,
16916 N_EQK | N_INT, N_8 | N_KEY);
16917 neon_two_same (neon_quad (rs), 1, et.size);
16923 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
16924 neon_two_same (neon_quad (rs), 1, -1);
16928 do_neon_tbl_tbx (void)
16930 unsigned listlenbits;
16931 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
16933 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
16935 first_error (_("bad list length for table lookup"));
16939 listlenbits = inst.operands[1].imm - 1;
16940 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16941 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16942 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16943 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16944 inst.instruction |= LOW4 (inst.operands[2].reg);
16945 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16946 inst.instruction |= listlenbits << 8;
16948 neon_dp_fixup (&inst);
16952 do_neon_ldm_stm (void)
16954 /* P, U and L bits are part of bitmask. */
16955 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
16956 unsigned offsetbits = inst.operands[1].imm * 2;
16958 if (inst.operands[1].issingle)
16960 do_vfp_nsyn_ldm_stm (is_dbmode);
16964 constraint (is_dbmode && !inst.operands[0].writeback,
16965 _("writeback (!) must be used for VLDMDB and VSTMDB"));
16967 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
16968 _("register list must contain at least 1 and at most 16 "
16971 inst.instruction |= inst.operands[0].reg << 16;
16972 inst.instruction |= inst.operands[0].writeback << 21;
16973 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
16974 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
16976 inst.instruction |= offsetbits;
16978 do_vfp_cond_or_thumb ();
16982 do_neon_ldr_str (void)
16984 int is_ldr = (inst.instruction & (1 << 20)) != 0;
16986 /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
16987 And is UNPREDICTABLE in thumb mode. */
16989 && inst.operands[1].reg == REG_PC
16990 && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
16993 inst.error = _("Use of PC here is UNPREDICTABLE");
16994 else if (warn_on_deprecated)
16995 as_tsktsk (_("Use of PC here is deprecated"));
16998 if (inst.operands[0].issingle)
17001 do_vfp_nsyn_opcode ("flds");
17003 do_vfp_nsyn_opcode ("fsts");
17005 /* ARMv8.2 vldr.16/vstr.16 instruction. */
17006 if (inst.vectype.el[0].size == 16)
17007 do_scalar_fp16_v82_encode ();
17012 do_vfp_nsyn_opcode ("fldd");
17014 do_vfp_nsyn_opcode ("fstd");
17018 /* "interleave" version also handles non-interleaving register VLD1/VST1
17022 do_neon_ld_st_interleave (void)
17024 struct neon_type_el et = neon_check_type (1, NS_NULL,
17025 N_8 | N_16 | N_32 | N_64);
17026 unsigned alignbits = 0;
17028 /* The bits in this table go:
17029 0: register stride of one (0) or two (1)
17030 1,2: register list length, minus one (1, 2, 3, 4).
17031 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
17032 We use -1 for invalid entries. */
17033 const int typetable[] =
17035 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
17036 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
17037 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
17038 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
17042 if (et.type == NT_invtype)
17045 if (inst.operands[1].immisalign)
17046 switch (inst.operands[1].imm >> 8)
17048 case 64: alignbits = 1; break;
17050 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
17051 && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17052 goto bad_alignment;
17056 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
17057 goto bad_alignment;
17062 first_error (_("bad alignment"));
17066 inst.instruction |= alignbits << 4;
17067 inst.instruction |= neon_logbits (et.size) << 6;
17069 /* Bits [4:6] of the immediate in a list specifier encode register stride
17070 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
17071 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
17072 up the right value for "type" in a table based on this value and the given
17073 list style, then stick it back. */
17074 idx = ((inst.operands[0].imm >> 4) & 7)
17075 | (((inst.instruction >> 8) & 3) << 3);
17077 typebits = typetable[idx];
17079 constraint (typebits == -1, _("bad list type for instruction"));
17080 constraint (((inst.instruction >> 8) & 3) && et.size == 64,
17081 _("bad element type for instruction"));
17083 inst.instruction &= ~0xf00;
17084 inst.instruction |= typebits << 8;
17087 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
17088 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
17089 otherwise. The variable arguments are a list of pairs of legal (size, align)
17090 values, terminated with -1. */
17093 neon_alignment_bit (int size, int align, int *do_alignment, ...)
17096 int result = FAIL, thissize, thisalign;
17098 if (!inst.operands[1].immisalign)
17104 va_start (ap, do_alignment);
17108 thissize = va_arg (ap, int);
17109 if (thissize == -1)
17111 thisalign = va_arg (ap, int);
17113 if (size == thissize && align == thisalign)
17116 while (result != SUCCESS);
17120 if (result == SUCCESS)
17123 first_error (_("unsupported alignment for instruction"));
17129 do_neon_ld_st_lane (void)
17131 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17132 int align_good, do_alignment = 0;
17133 int logsize = neon_logbits (et.size);
17134 int align = inst.operands[1].imm >> 8;
17135 int n = (inst.instruction >> 8) & 3;
17136 int max_el = 64 / et.size;
17138 if (et.type == NT_invtype)
17141 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
17142 _("bad list length"));
17143 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
17144 _("scalar index out of range"));
17145 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
17147 _("stride of 2 unavailable when element size is 8"));
17151 case 0: /* VLD1 / VST1. */
17152 align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
17154 if (align_good == FAIL)
17158 unsigned alignbits = 0;
17161 case 16: alignbits = 0x1; break;
17162 case 32: alignbits = 0x3; break;
17165 inst.instruction |= alignbits << 4;
17169 case 1: /* VLD2 / VST2. */
17170 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
17171 16, 32, 32, 64, -1);
17172 if (align_good == FAIL)
17175 inst.instruction |= 1 << 4;
17178 case 2: /* VLD3 / VST3. */
17179 constraint (inst.operands[1].immisalign,
17180 _("can't use alignment with this instruction"));
17183 case 3: /* VLD4 / VST4. */
17184 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17185 16, 64, 32, 64, 32, 128, -1);
17186 if (align_good == FAIL)
17190 unsigned alignbits = 0;
17193 case 8: alignbits = 0x1; break;
17194 case 16: alignbits = 0x1; break;
17195 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
17198 inst.instruction |= alignbits << 4;
17205 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
17206 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17207 inst.instruction |= 1 << (4 + logsize);
17209 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
17210 inst.instruction |= logsize << 10;
17213 /* Encode single n-element structure to all lanes VLD<n> instructions. */
17216 do_neon_ld_dup (void)
17218 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
17219 int align_good, do_alignment = 0;
17221 if (et.type == NT_invtype)
17224 switch ((inst.instruction >> 8) & 3)
17226 case 0: /* VLD1. */
17227 gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
17228 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17229 &do_alignment, 16, 16, 32, 32, -1);
17230 if (align_good == FAIL)
17232 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
17235 case 2: inst.instruction |= 1 << 5; break;
17236 default: first_error (_("bad list length")); return;
17238 inst.instruction |= neon_logbits (et.size) << 6;
17241 case 1: /* VLD2. */
17242 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
17243 &do_alignment, 8, 16, 16, 32, 32, 64,
17245 if (align_good == FAIL)
17247 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
17248 _("bad list length"));
17249 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17250 inst.instruction |= 1 << 5;
17251 inst.instruction |= neon_logbits (et.size) << 6;
17254 case 2: /* VLD3. */
17255 constraint (inst.operands[1].immisalign,
17256 _("can't use alignment with this instruction"));
17257 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
17258 _("bad list length"));
17259 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17260 inst.instruction |= 1 << 5;
17261 inst.instruction |= neon_logbits (et.size) << 6;
17264 case 3: /* VLD4. */
17266 int align = inst.operands[1].imm >> 8;
17267 align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
17268 16, 64, 32, 64, 32, 128, -1);
17269 if (align_good == FAIL)
17271 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
17272 _("bad list length"));
17273 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
17274 inst.instruction |= 1 << 5;
17275 if (et.size == 32 && align == 128)
17276 inst.instruction |= 0x3 << 6;
17278 inst.instruction |= neon_logbits (et.size) << 6;
17285 inst.instruction |= do_alignment << 4;
17288 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
17289 apart from bits [11:4]. */
17292 do_neon_ldx_stx (void)
17294 if (inst.operands[1].isreg)
17295 constraint (inst.operands[1].reg == REG_PC, BAD_PC);
17297 switch (NEON_LANE (inst.operands[0].imm))
17299 case NEON_INTERLEAVE_LANES:
17300 NEON_ENCODE (INTERLV, inst);
17301 do_neon_ld_st_interleave ();
17304 case NEON_ALL_LANES:
17305 NEON_ENCODE (DUP, inst);
17306 if (inst.instruction == N_INV)
17308 first_error ("only loads support such operands");
17315 NEON_ENCODE (LANE, inst);
17316 do_neon_ld_st_lane ();
17319 /* L bit comes from bit mask. */
17320 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17321 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17322 inst.instruction |= inst.operands[1].reg << 16;
17324 if (inst.operands[1].postind)
17326 int postreg = inst.operands[1].imm & 0xf;
17327 constraint (!inst.operands[1].immisreg,
17328 _("post-index must be a register"));
17329 constraint (postreg == 0xd || postreg == 0xf,
17330 _("bad register for post-index"));
17331 inst.instruction |= postreg;
17335 constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17336 constraint (inst.reloc.exp.X_op != O_constant
17337 || inst.reloc.exp.X_add_number != 0,
17340 if (inst.operands[1].writeback)
17342 inst.instruction |= 0xd;
17345 inst.instruction |= 0xf;
17349 inst.instruction |= 0xf9000000;
17351 inst.instruction |= 0xf4000000;
17356 do_vfp_nsyn_fpv8 (enum neon_shape rs)
17358 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17359 D register operands. */
17360 if (neon_shape_class[rs] == SC_DOUBLE)
17361 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17364 NEON_ENCODE (FPV8, inst);
17366 if (rs == NS_FFF || rs == NS_HHH)
17368 do_vfp_sp_dyadic ();
17370 /* ARMv8.2 fp16 instruction. */
17372 do_scalar_fp16_v82_encode ();
17375 do_vfp_dp_rd_rn_rm ();
17378 inst.instruction |= 0x100;
17380 inst.instruction |= 0xf0000000;
17386 set_it_insn_type (OUTSIDE_IT_INSN);
17388 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
17389 first_error (_("invalid instruction shape"));
17395 set_it_insn_type (OUTSIDE_IT_INSN);
17397 if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
17400 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17403 neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
17407 do_vrint_1 (enum neon_cvt_mode mode)
17409 enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
17410 struct neon_type_el et;
17415 /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
17416 D register operands. */
17417 if (neon_shape_class[rs] == SC_DOUBLE)
17418 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17421 et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
17423 if (et.type != NT_invtype)
17425 /* VFP encodings. */
17426 if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
17427 || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
17428 set_it_insn_type (OUTSIDE_IT_INSN);
17430 NEON_ENCODE (FPV8, inst);
17431 if (rs == NS_FF || rs == NS_HH)
17432 do_vfp_sp_monadic ();
17434 do_vfp_dp_rd_rm ();
17438 case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
17439 case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
17440 case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
17441 case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
17442 case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
17443 case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
17444 case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
17448 inst.instruction |= (rs == NS_DD) << 8;
17449 do_vfp_cond_or_thumb ();
17451 /* ARMv8.2 fp16 vrint instruction. */
17453 do_scalar_fp16_v82_encode ();
17457 /* Neon encodings (or something broken...). */
17459 et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
17461 if (et.type == NT_invtype)
17464 set_it_insn_type (OUTSIDE_IT_INSN);
17465 NEON_ENCODE (FLOAT, inst);
17467 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH8) == FAIL)
17470 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17471 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17472 inst.instruction |= LOW4 (inst.operands[1].reg);
17473 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17474 inst.instruction |= neon_quad (rs) << 6;
17475 /* Mask off the original size bits and reencode them. */
17476 inst.instruction = ((inst.instruction & 0xfff3ffff)
17477 | neon_logbits (et.size) << 18);
17481 case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
17482 case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
17483 case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
17484 case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
17485 case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
17486 case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
17487 case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
17492 inst.instruction |= 0xfc000000;
17494 inst.instruction |= 0xf0000000;
17501 do_vrint_1 (neon_cvt_mode_x);
17507 do_vrint_1 (neon_cvt_mode_z);
17513 do_vrint_1 (neon_cvt_mode_r);
17519 do_vrint_1 (neon_cvt_mode_a);
17525 do_vrint_1 (neon_cvt_mode_n);
17531 do_vrint_1 (neon_cvt_mode_p);
17537 do_vrint_1 (neon_cvt_mode_m);
17541 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
17543 unsigned regno = NEON_SCALAR_REG (opnd);
17544 unsigned elno = NEON_SCALAR_INDEX (opnd);
17546 if (elsize == 16 && elno < 2 && regno < 16)
17547 return regno | (elno << 4);
17548 else if (elsize == 32 && elno == 0)
17551 first_error (_("scalar out of range"));
17558 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17560 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17561 unsigned rot = inst.reloc.exp.X_add_number;
17562 constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
17563 _("immediate out of range"));
17565 if (inst.operands[2].isscalar)
17567 enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
17568 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17569 N_KEY | N_F16 | N_F32).size;
17570 unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
17572 inst.instruction = 0xfe000800;
17573 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17574 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17575 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17576 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17577 inst.instruction |= LOW4 (m);
17578 inst.instruction |= HI1 (m) << 5;
17579 inst.instruction |= neon_quad (rs) << 6;
17580 inst.instruction |= rot << 20;
17581 inst.instruction |= (size == 32) << 23;
17585 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17586 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17587 N_KEY | N_F16 | N_F32).size;
17588 neon_three_same (neon_quad (rs), 0, -1);
17589 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17590 inst.instruction |= 0xfc200800;
17591 inst.instruction |= rot << 23;
17592 inst.instruction |= (size == 32) << 20;
17599 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17601 constraint (inst.reloc.exp.X_op != O_constant, _("expression too complex"));
17602 unsigned rot = inst.reloc.exp.X_add_number;
17603 constraint (rot != 90 && rot != 270, _("immediate out of range"));
17604 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
17605 unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
17606 N_KEY | N_F16 | N_F32).size;
17607 neon_three_same (neon_quad (rs), 0, -1);
17608 inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup. */
17609 inst.instruction |= 0xfc800800;
17610 inst.instruction |= (rot == 270) << 24;
17611 inst.instruction |= (size == 32) << 20;
17614 /* Dot Product instructions encoding support. */
17617 do_neon_dotproduct (int unsigned_p)
17619 enum neon_shape rs;
17620 unsigned scalar_oprd2 = 0;
17623 if (inst.cond != COND_ALWAYS)
17624 as_warn (_("Dot Product instructions cannot be conditional, the behaviour "
17625 "is UNPREDICTABLE"));
17627 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
17630 /* Dot Product instructions are in three-same D/Q register format or the third
17631 operand can be a scalar index register. */
17632 if (inst.operands[2].isscalar)
17634 scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
17635 high8 = 0xfe000000;
17636 rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17640 high8 = 0xfc000000;
17641 rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17645 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
17647 neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
17649 /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
17650 Product instruction, so we pass 0 as the "ubit" parameter. And the
17651 "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter. */
17652 neon_three_same (neon_quad (rs), 0, 32);
17654 /* Undo neon_dp_fixup. Dot Product instructions are using a slightly
17655 different NEON three-same encoding. */
17656 inst.instruction &= 0x00ffffff;
17657 inst.instruction |= high8;
17658 /* Encode 'U' bit which indicates signedness. */
17659 inst.instruction |= (unsigned_p ? 1 : 0) << 4;
17660 /* Re-encode operand2 if it's indexed scalar operand. What has been encoded
17661 from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
17662 the instruction encoding. */
17663 if (inst.operands[2].isscalar)
17665 inst.instruction &= 0xffffffd0;
17666 inst.instruction |= LOW4 (scalar_oprd2);
17667 inst.instruction |= HI1 (scalar_oprd2) << 5;
17671 /* Dot Product instructions for signed integer. */
17674 do_neon_dotproduct_s (void)
17676 return do_neon_dotproduct (0);
17679 /* Dot Product instructions for unsigned integer. */
17682 do_neon_dotproduct_u (void)
17684 return do_neon_dotproduct (1);
17687 /* Crypto v1 instructions. */
17689 do_crypto_2op_1 (unsigned elttype, int op)
17691 set_it_insn_type (OUTSIDE_IT_INSN);
17693 if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
17699 NEON_ENCODE (INTEGER, inst);
17700 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17701 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17702 inst.instruction |= LOW4 (inst.operands[1].reg);
17703 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17705 inst.instruction |= op << 6;
17708 inst.instruction |= 0xfc000000;
17710 inst.instruction |= 0xf0000000;
17714 do_crypto_3op_1 (int u, int op)
17716 set_it_insn_type (OUTSIDE_IT_INSN);
17718 if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
17719 N_32 | N_UNT | N_KEY).type == NT_invtype)
17724 NEON_ENCODE (INTEGER, inst);
17725 neon_three_same (1, u, 8 << op);
17731 do_crypto_2op_1 (N_8, 0);
17737 do_crypto_2op_1 (N_8, 1);
17743 do_crypto_2op_1 (N_8, 2);
17749 do_crypto_2op_1 (N_8, 3);
17755 do_crypto_3op_1 (0, 0);
17761 do_crypto_3op_1 (0, 1);
17767 do_crypto_3op_1 (0, 2);
17773 do_crypto_3op_1 (0, 3);
17779 do_crypto_3op_1 (1, 0);
17785 do_crypto_3op_1 (1, 1);
17789 do_sha256su1 (void)
17791 do_crypto_3op_1 (1, 2);
17797 do_crypto_2op_1 (N_32, -1);
17803 do_crypto_2op_1 (N_32, 0);
17807 do_sha256su0 (void)
17809 do_crypto_2op_1 (N_32, 1);
17813 do_crc32_1 (unsigned int poly, unsigned int sz)
17815 unsigned int Rd = inst.operands[0].reg;
17816 unsigned int Rn = inst.operands[1].reg;
17817 unsigned int Rm = inst.operands[2].reg;
17819 set_it_insn_type (OUTSIDE_IT_INSN);
17820 inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
17821 inst.instruction |= LOW4 (Rn) << 16;
17822 inst.instruction |= LOW4 (Rm);
17823 inst.instruction |= sz << (thumb_mode ? 4 : 21);
17824 inst.instruction |= poly << (thumb_mode ? 20 : 9);
17826 if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
17827 as_warn (UNPRED_REG ("r15"));
17869 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
17871 neon_check_type (2, NS_FD, N_S32, N_F64);
17872 do_vfp_sp_dp_cvt ();
17873 do_vfp_cond_or_thumb ();
17877 /* Overall per-instruction processing. */
17879 /* We need to be able to fix up arbitrary expressions in some statements.
17880 This is so that we can handle symbols that are an arbitrary distance from
17881 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
17882 which returns part of an address in a form which will be valid for
17883 a data instruction. We do this by pushing the expression into a symbol
17884 in the expr_section, and creating a fix for that. */
17887 fix_new_arm (fragS * frag,
17901 /* Create an absolute valued symbol, so we have something to
17902 refer to in the object file. Unfortunately for us, gas's
17903 generic expression parsing will already have folded out
17904 any use of .set foo/.type foo %function that may have
17905 been used to set type information of the target location,
17906 that's being specified symbolically. We have to presume
17907 the user knows what they are doing. */
17911 sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
17913 symbol = symbol_find_or_make (name);
17914 S_SET_SEGMENT (symbol, absolute_section);
17915 symbol_set_frag (symbol, &zero_address_frag);
17916 S_SET_VALUE (symbol, exp->X_add_number);
17917 exp->X_op = O_symbol;
17918 exp->X_add_symbol = symbol;
17919 exp->X_add_number = 0;
17925 new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
17926 (enum bfd_reloc_code_real) reloc);
17930 new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
17931 pc_rel, (enum bfd_reloc_code_real) reloc);
17935 /* Mark whether the fix is to a THUMB instruction, or an ARM
17937 new_fix->tc_fix_data = thumb_mode;
17940 /* Create a frg for an instruction requiring relaxation. */
17942 output_relax_insn (void)
17948 /* The size of the instruction is unknown, so tie the debug info to the
17949 start of the instruction. */
17950 dwarf2_emit_insn (0);
17952 switch (inst.reloc.exp.X_op)
17955 sym = inst.reloc.exp.X_add_symbol;
17956 offset = inst.reloc.exp.X_add_number;
17960 offset = inst.reloc.exp.X_add_number;
17963 sym = make_expr_symbol (&inst.reloc.exp);
17967 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
17968 inst.relax, sym, offset, NULL/*offset, opcode*/);
17969 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
17972 /* Write a 32-bit thumb instruction to buf. */
17974 put_thumb32_insn (char * buf, unsigned long insn)
17976 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
17977 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
17981 output_inst (const char * str)
17987 as_bad ("%s -- `%s'", inst.error, str);
17992 output_relax_insn ();
17995 if (inst.size == 0)
17998 to = frag_more (inst.size);
17999 /* PR 9814: Record the thumb mode into the current frag so that we know
18000 what type of NOP padding to use, if necessary. We override any previous
18001 setting so that if the mode has changed then the NOPS that we use will
18002 match the encoding of the last instruction in the frag. */
18003 frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
18005 if (thumb_mode && (inst.size > THUMB_SIZE))
18007 gas_assert (inst.size == (2 * THUMB_SIZE));
18008 put_thumb32_insn (to, inst.instruction);
18010 else if (inst.size > INSN_SIZE)
18012 gas_assert (inst.size == (2 * INSN_SIZE));
18013 md_number_to_chars (to, inst.instruction, INSN_SIZE);
18014 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
18017 md_number_to_chars (to, inst.instruction, inst.size);
18019 if (inst.reloc.type != BFD_RELOC_UNUSED)
18020 fix_new_arm (frag_now, to - frag_now->fr_literal,
18021 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
18024 dwarf2_emit_insn (inst.size);
18028 output_it_inst (int cond, int mask, char * to)
18030 unsigned long instruction = 0xbf00;
18033 instruction |= mask;
18034 instruction |= cond << 4;
18038 to = frag_more (2);
18040 dwarf2_emit_insn (2);
18044 md_number_to_chars (to, instruction, 2);
18049 /* Tag values used in struct asm_opcode's tag field. */
18052 OT_unconditional, /* Instruction cannot be conditionalized.
18053 The ARM condition field is still 0xE. */
18054 OT_unconditionalF, /* Instruction cannot be conditionalized
18055 and carries 0xF in its ARM condition field. */
18056 OT_csuffix, /* Instruction takes a conditional suffix. */
18057 OT_csuffixF, /* Some forms of the instruction take a conditional
18058 suffix, others place 0xF where the condition field
18060 OT_cinfix3, /* Instruction takes a conditional infix,
18061 beginning at character index 3. (In
18062 unified mode, it becomes a suffix.) */
18063 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
18064 tsts, cmps, cmns, and teqs. */
18065 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
18066 character index 3, even in unified mode. Used for
18067 legacy instructions where suffix and infix forms
18068 may be ambiguous. */
18069 OT_csuf_or_in3, /* Instruction takes either a conditional
18070 suffix or an infix at character index 3. */
18071 OT_odd_infix_unc, /* This is the unconditional variant of an
18072 instruction that takes a conditional infix
18073 at an unusual position. In unified mode,
18074 this variant will accept a suffix. */
18075 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
18076 are the conditional variants of instructions that
18077 take conditional infixes in unusual positions.
18078 The infix appears at character index
18079 (tag - OT_odd_infix_0). These are not accepted
18080 in unified mode. */
18083 /* Subroutine of md_assemble, responsible for looking up the primary
18084 opcode from the mnemonic the user wrote. STR points to the
18085 beginning of the mnemonic.
18087 This is not simply a hash table lookup, because of conditional
18088 variants. Most instructions have conditional variants, which are
18089 expressed with a _conditional affix_ to the mnemonic. If we were
18090 to encode each conditional variant as a literal string in the opcode
18091 table, it would have approximately 20,000 entries.
18093 Most mnemonics take this affix as a suffix, and in unified syntax,
18094 'most' is upgraded to 'all'. However, in the divided syntax, some
18095 instructions take the affix as an infix, notably the s-variants of
18096 the arithmetic instructions. Of those instructions, all but six
18097 have the infix appear after the third character of the mnemonic.
18099 Accordingly, the algorithm for looking up primary opcodes given
18102 1. Look up the identifier in the opcode table.
18103 If we find a match, go to step U.
18105 2. Look up the last two characters of the identifier in the
18106 conditions table. If we find a match, look up the first N-2
18107 characters of the identifier in the opcode table. If we
18108 find a match, go to step CE.
18110 3. Look up the fourth and fifth characters of the identifier in
18111 the conditions table. If we find a match, extract those
18112 characters from the identifier, and look up the remaining
18113 characters in the opcode table. If we find a match, go
18118 U. Examine the tag field of the opcode structure, in case this is
18119 one of the six instructions with its conditional infix in an
18120 unusual place. If it is, the tag tells us where to find the
18121 infix; look it up in the conditions table and set inst.cond
18122 accordingly. Otherwise, this is an unconditional instruction.
18123 Again set inst.cond accordingly. Return the opcode structure.
18125 CE. Examine the tag field to make sure this is an instruction that
18126 should receive a conditional suffix. If it is not, fail.
18127 Otherwise, set inst.cond from the suffix we already looked up,
18128 and return the opcode structure.
18130 CM. Examine the tag field to make sure this is an instruction that
18131 should receive a conditional infix after the third character.
18132 If it is not, fail. Otherwise, undo the edits to the current
18133 line of input and proceed as for case CE. */
18135 static const struct asm_opcode *
18136 opcode_lookup (char **str)
18140 const struct asm_opcode *opcode;
18141 const struct asm_cond *cond;
18144 /* Scan up to the end of the mnemonic, which must end in white space,
18145 '.' (in unified mode, or for Neon/VFP instructions), or end of string. */
18146 for (base = end = *str; *end != '\0'; end++)
18147 if (*end == ' ' || *end == '.')
18153 /* Handle a possible width suffix and/or Neon type suffix. */
18158 /* The .w and .n suffixes are only valid if the unified syntax is in
18160 if (unified_syntax && end[1] == 'w')
18162 else if (unified_syntax && end[1] == 'n')
18167 inst.vectype.elems = 0;
18169 *str = end + offset;
18171 if (end[offset] == '.')
18173 /* See if we have a Neon type suffix (possible in either unified or
18174 non-unified ARM syntax mode). */
18175 if (parse_neon_type (&inst.vectype, str) == FAIL)
18178 else if (end[offset] != '\0' && end[offset] != ' ')
18184 /* Look for unaffixed or special-case affixed mnemonic. */
18185 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18190 if (opcode->tag < OT_odd_infix_0)
18192 inst.cond = COND_ALWAYS;
18196 if (warn_on_deprecated && unified_syntax)
18197 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18198 affix = base + (opcode->tag - OT_odd_infix_0);
18199 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18202 inst.cond = cond->value;
18206 /* Cannot have a conditional suffix on a mnemonic of less than two
18208 if (end - base < 3)
18211 /* Look for suffixed mnemonic. */
18213 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18214 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18216 if (opcode && cond)
18219 switch (opcode->tag)
18221 case OT_cinfix3_legacy:
18222 /* Ignore conditional suffixes matched on infix only mnemonics. */
18226 case OT_cinfix3_deprecated:
18227 case OT_odd_infix_unc:
18228 if (!unified_syntax)
18230 /* Fall through. */
18234 case OT_csuf_or_in3:
18235 inst.cond = cond->value;
18238 case OT_unconditional:
18239 case OT_unconditionalF:
18241 inst.cond = cond->value;
18244 /* Delayed diagnostic. */
18245 inst.error = BAD_COND;
18246 inst.cond = COND_ALWAYS;
18255 /* Cannot have a usual-position infix on a mnemonic of less than
18256 six characters (five would be a suffix). */
18257 if (end - base < 6)
18260 /* Look for infixed mnemonic in the usual position. */
18262 cond = (const struct asm_cond *) hash_find_n (arm_cond_hsh, affix, 2);
18266 memcpy (save, affix, 2);
18267 memmove (affix, affix + 2, (end - affix) - 2);
18268 opcode = (const struct asm_opcode *) hash_find_n (arm_ops_hsh, base,
18270 memmove (affix + 2, affix, (end - affix) - 2);
18271 memcpy (affix, save, 2);
18274 && (opcode->tag == OT_cinfix3
18275 || opcode->tag == OT_cinfix3_deprecated
18276 || opcode->tag == OT_csuf_or_in3
18277 || opcode->tag == OT_cinfix3_legacy))
18280 if (warn_on_deprecated && unified_syntax
18281 && (opcode->tag == OT_cinfix3
18282 || opcode->tag == OT_cinfix3_deprecated))
18283 as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
18285 inst.cond = cond->value;
18292 /* This function generates an initial IT instruction, leaving its block
18293 virtually open for the new instructions. Eventually,
18294 the mask will be updated by now_it_add_mask () each time
18295 a new instruction needs to be included in the IT block.
18296 Finally, the block is closed with close_automatic_it_block ().
18297 The block closure can be requested either from md_assemble (),
18298 a tencode (), or due to a label hook. */
18301 new_automatic_it_block (int cond)
18303 now_it.state = AUTOMATIC_IT_BLOCK;
18304 now_it.mask = 0x18;
18306 now_it.block_length = 1;
18307 mapping_state (MAP_THUMB);
18308 now_it.insn = output_it_inst (cond, now_it.mask, NULL);
18309 now_it.warn_deprecated = FALSE;
18310 now_it.insn_cond = TRUE;
18313 /* Close an automatic IT block.
18314 See comments in new_automatic_it_block (). */
18317 close_automatic_it_block (void)
18319 now_it.mask = 0x10;
18320 now_it.block_length = 0;
18323 /* Update the mask of the current automatically-generated IT
18324 instruction. See comments in new_automatic_it_block (). */
18327 now_it_add_mask (int cond)
18329 #define CLEAR_BIT(value, nbit) ((value) & ~(1 << (nbit)))
18330 #define SET_BIT_VALUE(value, bitvalue, nbit) (CLEAR_BIT (value, nbit) \
18331 | ((bitvalue) << (nbit)))
18332 const int resulting_bit = (cond & 1);
18334 now_it.mask &= 0xf;
18335 now_it.mask = SET_BIT_VALUE (now_it.mask,
18337 (5 - now_it.block_length));
18338 now_it.mask = SET_BIT_VALUE (now_it.mask,
18340 ((5 - now_it.block_length) - 1) );
18341 output_it_inst (now_it.cc, now_it.mask, now_it.insn);
18344 #undef SET_BIT_VALUE
18347 /* The IT blocks handling machinery is accessed through the these functions:
18348 it_fsm_pre_encode () from md_assemble ()
18349 set_it_insn_type () optional, from the tencode functions
18350 set_it_insn_type_last () ditto
18351 in_it_block () ditto
18352 it_fsm_post_encode () from md_assemble ()
18353 force_automatic_it_block_close () from label handling functions
18356 1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
18357 initializing the IT insn type with a generic initial value depending
18358 on the inst.condition.
18359 2) During the tencode function, two things may happen:
18360 a) The tencode function overrides the IT insn type by
18361 calling either set_it_insn_type (type) or set_it_insn_type_last ().
18362 b) The tencode function queries the IT block state by
18363 calling in_it_block () (i.e. to determine narrow/not narrow mode).
18365 Both set_it_insn_type and in_it_block run the internal FSM state
18366 handling function (handle_it_state), because: a) setting the IT insn
18367 type may incur in an invalid state (exiting the function),
18368 and b) querying the state requires the FSM to be updated.
18369 Specifically we want to avoid creating an IT block for conditional
18370 branches, so it_fsm_pre_encode is actually a guess and we can't
18371 determine whether an IT block is required until the tencode () routine
18372 has decided what type of instruction this actually it.
18373 Because of this, if set_it_insn_type and in_it_block have to be used,
18374 set_it_insn_type has to be called first.
18376 set_it_insn_type_last () is a wrapper of set_it_insn_type (type), that
18377 determines the insn IT type depending on the inst.cond code.
18378 When a tencode () routine encodes an instruction that can be
18379 either outside an IT block, or, in the case of being inside, has to be
18380 the last one, set_it_insn_type_last () will determine the proper
18381 IT instruction type based on the inst.cond code. Otherwise,
18382 set_it_insn_type can be called for overriding that logic or
18383 for covering other cases.
18385 Calling handle_it_state () may not transition the IT block state to
18386 OUTSIDE_IT_BLOCK immediately, since the (current) state could be
18387 still queried. Instead, if the FSM determines that the state should
18388 be transitioned to OUTSIDE_IT_BLOCK, a flag is marked to be closed
18389 after the tencode () function: that's what it_fsm_post_encode () does.
18391 Since in_it_block () calls the state handling function to get an
18392 updated state, an error may occur (due to invalid insns combination).
18393 In that case, inst.error is set.
18394 Therefore, inst.error has to be checked after the execution of
18395 the tencode () routine.
18397 3) Back in md_assemble(), it_fsm_post_encode () is called to commit
18398 any pending state change (if any) that didn't take place in
18399 handle_it_state () as explained above. */
18402 it_fsm_pre_encode (void)
18404 if (inst.cond != COND_ALWAYS)
18405 inst.it_insn_type = INSIDE_IT_INSN;
18407 inst.it_insn_type = OUTSIDE_IT_INSN;
18409 now_it.state_handled = 0;
18412 /* IT state FSM handling function. */
18415 handle_it_state (void)
18417 now_it.state_handled = 1;
18418 now_it.insn_cond = FALSE;
18420 switch (now_it.state)
18422 case OUTSIDE_IT_BLOCK:
18423 switch (inst.it_insn_type)
18425 case OUTSIDE_IT_INSN:
18428 case INSIDE_IT_INSN:
18429 case INSIDE_IT_LAST_INSN:
18430 if (thumb_mode == 0)
18433 && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
18434 as_tsktsk (_("Warning: conditional outside an IT block"\
18439 if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
18440 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
18442 /* Automatically generate the IT instruction. */
18443 new_automatic_it_block (inst.cond);
18444 if (inst.it_insn_type == INSIDE_IT_LAST_INSN)
18445 close_automatic_it_block ();
18449 inst.error = BAD_OUT_IT;
18455 case IF_INSIDE_IT_LAST_INSN:
18456 case NEUTRAL_IT_INSN:
18460 now_it.state = MANUAL_IT_BLOCK;
18461 now_it.block_length = 0;
18466 case AUTOMATIC_IT_BLOCK:
18467 /* Three things may happen now:
18468 a) We should increment current it block size;
18469 b) We should close current it block (closing insn or 4 insns);
18470 c) We should close current it block and start a new one (due
18471 to incompatible conditions or
18472 4 insns-length block reached). */
18474 switch (inst.it_insn_type)
18476 case OUTSIDE_IT_INSN:
18477 /* The closure of the block shall happen immediately,
18478 so any in_it_block () call reports the block as closed. */
18479 force_automatic_it_block_close ();
18482 case INSIDE_IT_INSN:
18483 case INSIDE_IT_LAST_INSN:
18484 case IF_INSIDE_IT_LAST_INSN:
18485 now_it.block_length++;
18487 if (now_it.block_length > 4
18488 || !now_it_compatible (inst.cond))
18490 force_automatic_it_block_close ();
18491 if (inst.it_insn_type != IF_INSIDE_IT_LAST_INSN)
18492 new_automatic_it_block (inst.cond);
18496 now_it.insn_cond = TRUE;
18497 now_it_add_mask (inst.cond);
18500 if (now_it.state == AUTOMATIC_IT_BLOCK
18501 && (inst.it_insn_type == INSIDE_IT_LAST_INSN
18502 || inst.it_insn_type == IF_INSIDE_IT_LAST_INSN))
18503 close_automatic_it_block ();
18506 case NEUTRAL_IT_INSN:
18507 now_it.block_length++;
18508 now_it.insn_cond = TRUE;
18510 if (now_it.block_length > 4)
18511 force_automatic_it_block_close ();
18513 now_it_add_mask (now_it.cc & 1);
18517 close_automatic_it_block ();
18518 now_it.state = MANUAL_IT_BLOCK;
18523 case MANUAL_IT_BLOCK:
18525 /* Check conditional suffixes. */
18526 const int cond = now_it.cc ^ ((now_it.mask >> 4) & 1) ^ 1;
18529 now_it.mask &= 0x1f;
18530 is_last = (now_it.mask == 0x10);
18531 now_it.insn_cond = TRUE;
18533 switch (inst.it_insn_type)
18535 case OUTSIDE_IT_INSN:
18536 inst.error = BAD_NOT_IT;
18539 case INSIDE_IT_INSN:
18540 if (cond != inst.cond)
18542 inst.error = BAD_IT_COND;
18547 case INSIDE_IT_LAST_INSN:
18548 case IF_INSIDE_IT_LAST_INSN:
18549 if (cond != inst.cond)
18551 inst.error = BAD_IT_COND;
18556 inst.error = BAD_BRANCH;
18561 case NEUTRAL_IT_INSN:
18562 /* The BKPT instruction is unconditional even in an IT block. */
18566 inst.error = BAD_IT_IT;
18576 struct depr_insn_mask
18578 unsigned long pattern;
18579 unsigned long mask;
18580 const char* description;
18583 /* List of 16-bit instruction patterns deprecated in an IT block in
18585 static const struct depr_insn_mask depr_it_insns[] = {
18586 { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
18587 { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
18588 { 0xa000, 0xb800, N_("ADR") },
18589 { 0x4800, 0xf800, N_("Literal loads") },
18590 { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
18591 { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
18592 /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
18593 field in asm_opcode. 'tvalue' is used at the stage this check happen. */
18594 { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
18599 it_fsm_post_encode (void)
18603 if (!now_it.state_handled)
18604 handle_it_state ();
18606 if (now_it.insn_cond
18607 && !now_it.warn_deprecated
18608 && warn_on_deprecated
18609 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
18610 && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
18612 if (inst.instruction >= 0x10000)
18614 as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
18615 "performance deprecated in ARMv8-A and ARMv8-R"));
18616 now_it.warn_deprecated = TRUE;
18620 const struct depr_insn_mask *p = depr_it_insns;
18622 while (p->mask != 0)
18624 if ((inst.instruction & p->mask) == p->pattern)
18626 as_tsktsk (_("IT blocks containing 16-bit Thumb "
18627 "instructions of the following class are "
18628 "performance deprecated in ARMv8-A and "
18629 "ARMv8-R: %s"), p->description);
18630 now_it.warn_deprecated = TRUE;
18638 if (now_it.block_length > 1)
18640 as_tsktsk (_("IT blocks containing more than one conditional "
18641 "instruction are performance deprecated in ARMv8-A and "
18643 now_it.warn_deprecated = TRUE;
18647 is_last = (now_it.mask == 0x10);
18650 now_it.state = OUTSIDE_IT_BLOCK;
18656 force_automatic_it_block_close (void)
18658 if (now_it.state == AUTOMATIC_IT_BLOCK)
18660 close_automatic_it_block ();
18661 now_it.state = OUTSIDE_IT_BLOCK;
18669 if (!now_it.state_handled)
18670 handle_it_state ();
18672 return now_it.state != OUTSIDE_IT_BLOCK;
18675 /* Whether OPCODE only has T32 encoding. Since this function is only used by
18676 t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
18677 here, hence the "known" in the function name. */
18680 known_t32_only_insn (const struct asm_opcode *opcode)
18682 /* Original Thumb-1 wide instruction. */
18683 if (opcode->tencode == do_t_blx
18684 || opcode->tencode == do_t_branch23
18685 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
18686 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
18689 /* Wide-only instruction added to ARMv8-M Baseline. */
18690 if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
18691 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
18692 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
18693 || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
18699 /* Whether wide instruction variant can be used if available for a valid OPCODE
18703 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
18705 if (known_t32_only_insn (opcode))
18708 /* Instruction with narrow and wide encoding added to ARMv8-M. Availability
18709 of variant T3 of B.W is checked in do_t_branch. */
18710 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18711 && opcode->tencode == do_t_branch)
18714 /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit. */
18715 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
18716 && opcode->tencode == do_t_mov_cmp
18717 /* Make sure CMP instruction is not affected. */
18718 && opcode->aencode == do_mov)
18721 /* Wide instruction variants of all instructions with narrow *and* wide
18722 variants become available with ARMv6t2. Other opcodes are either
18723 narrow-only or wide-only and are thus available if OPCODE is valid. */
18724 if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
18727 /* OPCODE with narrow only instruction variant or wide variant not
18733 md_assemble (char *str)
18736 const struct asm_opcode * opcode;
18738 /* Align the previous label if needed. */
18739 if (last_label_seen != NULL)
18741 symbol_set_frag (last_label_seen, frag_now);
18742 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
18743 S_SET_SEGMENT (last_label_seen, now_seg);
18746 memset (&inst, '\0', sizeof (inst));
18747 inst.reloc.type = BFD_RELOC_UNUSED;
18749 opcode = opcode_lookup (&p);
18752 /* It wasn't an instruction, but it might be a register alias of
18753 the form alias .req reg, or a Neon .dn/.qn directive. */
18754 if (! create_register_alias (str, p)
18755 && ! create_neon_reg_alias (str, p))
18756 as_bad (_("bad instruction `%s'"), str);
18761 if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
18762 as_tsktsk (_("s suffix on comparison instruction is deprecated"));
18764 /* The value which unconditional instructions should have in place of the
18765 condition field. */
18766 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
18770 arm_feature_set variant;
18772 variant = cpu_variant;
18773 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
18774 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
18775 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
18776 /* Check that this instruction is supported for this CPU. */
18777 if (!opcode->tvariant
18778 || (thumb_mode == 1
18779 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
18781 if (opcode->tencode == do_t_swi)
18782 as_bad (_("SVC is not permitted on this architecture"));
18784 as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
18787 if (inst.cond != COND_ALWAYS && !unified_syntax
18788 && opcode->tencode != do_t_branch)
18790 as_bad (_("Thumb does not support conditional execution"));
18794 /* Two things are addressed here:
18795 1) Implicit require narrow instructions on Thumb-1.
18796 This avoids relaxation accidentally introducing Thumb-2
18798 2) Reject wide instructions in non Thumb-2 cores.
18800 Only instructions with narrow and wide variants need to be handled
18801 but selecting all non wide-only instructions is easier. */
18802 if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
18803 && !t32_insn_ok (variant, opcode))
18805 if (inst.size_req == 0)
18807 else if (inst.size_req == 4)
18809 if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
18810 as_bad (_("selected processor does not support 32bit wide "
18811 "variant of instruction `%s'"), str);
18813 as_bad (_("selected processor does not support `%s' in "
18814 "Thumb-2 mode"), str);
18819 inst.instruction = opcode->tvalue;
18821 if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
18823 /* Prepare the it_insn_type for those encodings that don't set
18825 it_fsm_pre_encode ();
18827 opcode->tencode ();
18829 it_fsm_post_encode ();
18832 if (!(inst.error || inst.relax))
18834 gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
18835 inst.size = (inst.instruction > 0xffff ? 4 : 2);
18836 if (inst.size_req && inst.size_req != inst.size)
18838 as_bad (_("cannot honor width suffix -- `%s'"), str);
18843 /* Something has gone badly wrong if we try to relax a fixed size
18845 gas_assert (inst.size_req == 0 || !inst.relax);
18847 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18848 *opcode->tvariant);
18849 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
18850 set those bits when Thumb-2 32-bit instructions are seen. The impact
18851 of relaxable instructions will be considered later after we finish all
18853 if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
18854 variant = arm_arch_none;
18856 variant = cpu_variant;
18857 if (inst.size == 4 && !t32_insn_ok (variant, opcode))
18858 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
18861 check_neon_suffixes;
18865 mapping_state (MAP_THUMB);
18868 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
18872 /* bx is allowed on v5 cores, and sometimes on v4 cores. */
18873 is_bx = (opcode->aencode == do_bx);
18875 /* Check that this instruction is supported for this CPU. */
18876 if (!(is_bx && fix_v4bx)
18877 && !(opcode->avariant &&
18878 ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
18880 as_bad (_("selected processor does not support `%s' in ARM mode"), str);
18885 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
18889 inst.instruction = opcode->avalue;
18890 if (opcode->tag == OT_unconditionalF)
18891 inst.instruction |= 0xFU << 28;
18893 inst.instruction |= inst.cond << 28;
18894 inst.size = INSN_SIZE;
18895 if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
18897 it_fsm_pre_encode ();
18898 opcode->aencode ();
18899 it_fsm_post_encode ();
18901 /* Arm mode bx is marked as both v4T and v5 because it's still required
18902 on a hypothetical non-thumb v5 core. */
18904 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
18906 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
18907 *opcode->avariant);
18909 check_neon_suffixes;
18913 mapping_state (MAP_ARM);
18918 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
18926 check_it_blocks_finished (void)
18931 for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
18932 if (seg_info (sect)->tc_segment_info_data.current_it.state
18933 == MANUAL_IT_BLOCK)
18935 as_warn (_("section '%s' finished with an open IT block."),
18939 if (now_it.state == MANUAL_IT_BLOCK)
18940 as_warn (_("file finished with an open IT block."));
18944 /* Various frobbings of labels and their addresses. */
18947 arm_start_line_hook (void)
18949 last_label_seen = NULL;
18953 arm_frob_label (symbolS * sym)
18955 last_label_seen = sym;
18957 ARM_SET_THUMB (sym, thumb_mode);
18959 #if defined OBJ_COFF || defined OBJ_ELF
18960 ARM_SET_INTERWORK (sym, support_interwork);
18963 force_automatic_it_block_close ();
18965 /* Note - do not allow local symbols (.Lxxx) to be labelled
18966 as Thumb functions. This is because these labels, whilst
18967 they exist inside Thumb code, are not the entry points for
18968 possible ARM->Thumb calls. Also, these labels can be used
18969 as part of a computed goto or switch statement. eg gcc
18970 can generate code that looks like this:
18972 ldr r2, [pc, .Laaa]
18982 The first instruction loads the address of the jump table.
18983 The second instruction converts a table index into a byte offset.
18984 The third instruction gets the jump address out of the table.
18985 The fourth instruction performs the jump.
18987 If the address stored at .Laaa is that of a symbol which has the
18988 Thumb_Func bit set, then the linker will arrange for this address
18989 to have the bottom bit set, which in turn would mean that the
18990 address computation performed by the third instruction would end
18991 up with the bottom bit set. Since the ARM is capable of unaligned
18992 word loads, the instruction would then load the incorrect address
18993 out of the jump table, and chaos would ensue. */
18994 if (label_is_thumb_function_name
18995 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
18996 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
18998 /* When the address of a Thumb function is taken the bottom
18999 bit of that address should be set. This will allow
19000 interworking between Arm and Thumb functions to work
19003 THUMB_SET_FUNC (sym, 1);
19005 label_is_thumb_function_name = FALSE;
19008 dwarf2_emit_label (sym);
19012 arm_data_in_code (void)
19014 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
19016 *input_line_pointer = '/';
19017 input_line_pointer += 5;
19018 *input_line_pointer = 0;
19026 arm_canonicalize_symbol_name (char * name)
19030 if (thumb_mode && (len = strlen (name)) > 5
19031 && streq (name + len - 5, "/data"))
19032 *(name + len - 5) = 0;
19037 /* Table of all register names defined by default. The user can
19038 define additional names with .req. Note that all register names
19039 should appear in both upper and lowercase variants. Some registers
19040 also have mixed-case names. */
19042 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
19043 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
19044 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
19045 #define REGSET(p,t) \
19046 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
19047 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
19048 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
19049 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
19050 #define REGSETH(p,t) \
19051 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
19052 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
19053 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
19054 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
19055 #define REGSET2(p,t) \
19056 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
19057 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
19058 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
19059 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
19060 #define SPLRBANK(base,bank,t) \
19061 REGDEF(lr_##bank, 768|((base+0)<<16), t), \
19062 REGDEF(sp_##bank, 768|((base+1)<<16), t), \
19063 REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
19064 REGDEF(LR_##bank, 768|((base+0)<<16), t), \
19065 REGDEF(SP_##bank, 768|((base+1)<<16), t), \
19066 REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
19068 static const struct reg_entry reg_names[] =
19070 /* ARM integer registers. */
19071 REGSET(r, RN), REGSET(R, RN),
19073 /* ATPCS synonyms. */
19074 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
19075 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
19076 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
19078 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
19079 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
19080 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
19082 /* Well-known aliases. */
19083 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
19084 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
19086 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
19087 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
19089 /* Coprocessor numbers. */
19090 REGSET(p, CP), REGSET(P, CP),
19092 /* Coprocessor register numbers. The "cr" variants are for backward
19094 REGSET(c, CN), REGSET(C, CN),
19095 REGSET(cr, CN), REGSET(CR, CN),
19097 /* ARM banked registers. */
19098 REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
19099 REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
19100 REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
19101 REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
19102 REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
19103 REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
19104 REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
19106 REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
19107 REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
19108 REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
19109 REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
19110 REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
19111 REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
19112 REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
19113 REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
19115 SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
19116 SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
19117 SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
19118 SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
19119 SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
19120 REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
19121 REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
19122 REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
19123 REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
19125 /* FPA registers. */
19126 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
19127 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
19129 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
19130 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
19132 /* VFP SP registers. */
19133 REGSET(s,VFS), REGSET(S,VFS),
19134 REGSETH(s,VFS), REGSETH(S,VFS),
19136 /* VFP DP Registers. */
19137 REGSET(d,VFD), REGSET(D,VFD),
19138 /* Extra Neon DP registers. */
19139 REGSETH(d,VFD), REGSETH(D,VFD),
19141 /* Neon QP registers. */
19142 REGSET2(q,NQ), REGSET2(Q,NQ),
19144 /* VFP control registers. */
19145 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
19146 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
19147 REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
19148 REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
19149 REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
19150 REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
19151 REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
19153 /* Maverick DSP coprocessor registers. */
19154 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
19155 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
19157 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
19158 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
19159 REGDEF(dspsc,0,DSPSC),
19161 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
19162 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
19163 REGDEF(DSPSC,0,DSPSC),
19165 /* iWMMXt data registers - p0, c0-15. */
19166 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
19168 /* iWMMXt control registers - p1, c0-3. */
19169 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
19170 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
19171 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
19172 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
19174 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
19175 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
19176 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
19177 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
19178 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
19180 /* XScale accumulator registers. */
19181 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
19187 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
19188 within psr_required_here. */
19189 static const struct asm_psr psrs[] =
19191 /* Backward compatibility notation. Note that "all" is no longer
19192 truly all possible PSR bits. */
19193 {"all", PSR_c | PSR_f},
19197 /* Individual flags. */
19203 /* Combinations of flags. */
19204 {"fs", PSR_f | PSR_s},
19205 {"fx", PSR_f | PSR_x},
19206 {"fc", PSR_f | PSR_c},
19207 {"sf", PSR_s | PSR_f},
19208 {"sx", PSR_s | PSR_x},
19209 {"sc", PSR_s | PSR_c},
19210 {"xf", PSR_x | PSR_f},
19211 {"xs", PSR_x | PSR_s},
19212 {"xc", PSR_x | PSR_c},
19213 {"cf", PSR_c | PSR_f},
19214 {"cs", PSR_c | PSR_s},
19215 {"cx", PSR_c | PSR_x},
19216 {"fsx", PSR_f | PSR_s | PSR_x},
19217 {"fsc", PSR_f | PSR_s | PSR_c},
19218 {"fxs", PSR_f | PSR_x | PSR_s},
19219 {"fxc", PSR_f | PSR_x | PSR_c},
19220 {"fcs", PSR_f | PSR_c | PSR_s},
19221 {"fcx", PSR_f | PSR_c | PSR_x},
19222 {"sfx", PSR_s | PSR_f | PSR_x},
19223 {"sfc", PSR_s | PSR_f | PSR_c},
19224 {"sxf", PSR_s | PSR_x | PSR_f},
19225 {"sxc", PSR_s | PSR_x | PSR_c},
19226 {"scf", PSR_s | PSR_c | PSR_f},
19227 {"scx", PSR_s | PSR_c | PSR_x},
19228 {"xfs", PSR_x | PSR_f | PSR_s},
19229 {"xfc", PSR_x | PSR_f | PSR_c},
19230 {"xsf", PSR_x | PSR_s | PSR_f},
19231 {"xsc", PSR_x | PSR_s | PSR_c},
19232 {"xcf", PSR_x | PSR_c | PSR_f},
19233 {"xcs", PSR_x | PSR_c | PSR_s},
19234 {"cfs", PSR_c | PSR_f | PSR_s},
19235 {"cfx", PSR_c | PSR_f | PSR_x},
19236 {"csf", PSR_c | PSR_s | PSR_f},
19237 {"csx", PSR_c | PSR_s | PSR_x},
19238 {"cxf", PSR_c | PSR_x | PSR_f},
19239 {"cxs", PSR_c | PSR_x | PSR_s},
19240 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
19241 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
19242 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
19243 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
19244 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
19245 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
19246 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
19247 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
19248 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
19249 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
19250 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
19251 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
19252 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
19253 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
19254 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
19255 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
19256 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
19257 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
19258 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
19259 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
19260 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
19261 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
19262 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
19263 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
19266 /* Table of V7M psr names. */
19267 static const struct asm_psr v7m_psrs[] =
19269 {"apsr", 0x0 }, {"APSR", 0x0 },
19270 {"iapsr", 0x1 }, {"IAPSR", 0x1 },
19271 {"eapsr", 0x2 }, {"EAPSR", 0x2 },
19272 {"psr", 0x3 }, {"PSR", 0x3 },
19273 {"xpsr", 0x3 }, {"XPSR", 0x3 }, {"xPSR", 3 },
19274 {"ipsr", 0x5 }, {"IPSR", 0x5 },
19275 {"epsr", 0x6 }, {"EPSR", 0x6 },
19276 {"iepsr", 0x7 }, {"IEPSR", 0x7 },
19277 {"msp", 0x8 }, {"MSP", 0x8 },
19278 {"psp", 0x9 }, {"PSP", 0x9 },
19279 {"msplim", 0xa }, {"MSPLIM", 0xa },
19280 {"psplim", 0xb }, {"PSPLIM", 0xb },
19281 {"primask", 0x10}, {"PRIMASK", 0x10},
19282 {"basepri", 0x11}, {"BASEPRI", 0x11},
19283 {"basepri_max", 0x12}, {"BASEPRI_MAX", 0x12},
19284 {"faultmask", 0x13}, {"FAULTMASK", 0x13},
19285 {"control", 0x14}, {"CONTROL", 0x14},
19286 {"msp_ns", 0x88}, {"MSP_NS", 0x88},
19287 {"psp_ns", 0x89}, {"PSP_NS", 0x89},
19288 {"msplim_ns", 0x8a}, {"MSPLIM_NS", 0x8a},
19289 {"psplim_ns", 0x8b}, {"PSPLIM_NS", 0x8b},
19290 {"primask_ns", 0x90}, {"PRIMASK_NS", 0x90},
19291 {"basepri_ns", 0x91}, {"BASEPRI_NS", 0x91},
19292 {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
19293 {"control_ns", 0x94}, {"CONTROL_NS", 0x94},
19294 {"sp_ns", 0x98}, {"SP_NS", 0x98 }
19297 /* Table of all shift-in-operand names. */
19298 static const struct asm_shift_name shift_names [] =
19300 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
19301 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
19302 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
19303 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
19304 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
19305 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
19308 /* Table of all explicit relocation names. */
19310 static struct reloc_entry reloc_names[] =
19312 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
19313 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
19314 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
19315 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
19316 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
19317 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
19318 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
19319 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
19320 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
19321 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
19322 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32},
19323 { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
19324 { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
19325 { "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
19326 { "tlscall", BFD_RELOC_ARM_TLS_CALL},
19327 { "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
19328 { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
19329 { "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
19330 { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
19331 { "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
19332 { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19333 { "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
19334 { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
19335 { "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
19336 { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC }, { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
19337 { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC }, { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
19338 { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC }, { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
19342 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
19343 static const struct asm_cond conds[] =
19347 {"cs", 0x2}, {"hs", 0x2},
19348 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
19362 #define UL_BARRIER(L,U,CODE,FEAT) \
19363 { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
19364 { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
19366 static struct asm_barrier_opt barrier_opt_names[] =
19368 UL_BARRIER ("sy", "SY", 0xf, ARM_EXT_BARRIER),
19369 UL_BARRIER ("st", "ST", 0xe, ARM_EXT_BARRIER),
19370 UL_BARRIER ("ld", "LD", 0xd, ARM_EXT_V8),
19371 UL_BARRIER ("ish", "ISH", 0xb, ARM_EXT_BARRIER),
19372 UL_BARRIER ("sh", "SH", 0xb, ARM_EXT_BARRIER),
19373 UL_BARRIER ("ishst", "ISHST", 0xa, ARM_EXT_BARRIER),
19374 UL_BARRIER ("shst", "SHST", 0xa, ARM_EXT_BARRIER),
19375 UL_BARRIER ("ishld", "ISHLD", 0x9, ARM_EXT_V8),
19376 UL_BARRIER ("un", "UN", 0x7, ARM_EXT_BARRIER),
19377 UL_BARRIER ("nsh", "NSH", 0x7, ARM_EXT_BARRIER),
19378 UL_BARRIER ("unst", "UNST", 0x6, ARM_EXT_BARRIER),
19379 UL_BARRIER ("nshst", "NSHST", 0x6, ARM_EXT_BARRIER),
19380 UL_BARRIER ("nshld", "NSHLD", 0x5, ARM_EXT_V8),
19381 UL_BARRIER ("osh", "OSH", 0x3, ARM_EXT_BARRIER),
19382 UL_BARRIER ("oshst", "OSHST", 0x2, ARM_EXT_BARRIER),
19383 UL_BARRIER ("oshld", "OSHLD", 0x1, ARM_EXT_V8)
19388 /* Table of ARM-format instructions. */
19390 /* Macros for gluing together operand strings. N.B. In all cases
19391 other than OPS0, the trailing OP_stop comes from default
19392 zero-initialization of the unspecified elements of the array. */
19393 #define OPS0() { OP_stop, }
19394 #define OPS1(a) { OP_##a, }
19395 #define OPS2(a,b) { OP_##a,OP_##b, }
19396 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
19397 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
19398 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
19399 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
19401 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
19402 This is useful when mixing operands for ARM and THUMB, i.e. using the
19403 MIX_ARM_THUMB_OPERANDS macro.
19404 In order to use these macros, prefix the number of operands with _
19406 #define OPS_1(a) { a, }
19407 #define OPS_2(a,b) { a,b, }
19408 #define OPS_3(a,b,c) { a,b,c, }
19409 #define OPS_4(a,b,c,d) { a,b,c,d, }
19410 #define OPS_5(a,b,c,d,e) { a,b,c,d,e, }
19411 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
19413 /* These macros abstract out the exact format of the mnemonic table and
19414 save some repeated characters. */
19416 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
19417 #define TxCE(mnem, op, top, nops, ops, ae, te) \
19418 { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
19419 THUMB_VARIANT, do_##ae, do_##te }
19421 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
19422 a T_MNEM_xyz enumerator. */
19423 #define TCE(mnem, aop, top, nops, ops, ae, te) \
19424 TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
19425 #define tCE(mnem, aop, top, nops, ops, ae, te) \
19426 TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19428 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
19429 infix after the third character. */
19430 #define TxC3(mnem, op, top, nops, ops, ae, te) \
19431 { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
19432 THUMB_VARIANT, do_##ae, do_##te }
19433 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
19434 { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
19435 THUMB_VARIANT, do_##ae, do_##te }
19436 #define TC3(mnem, aop, top, nops, ops, ae, te) \
19437 TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
19438 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
19439 TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
19440 #define tC3(mnem, aop, top, nops, ops, ae, te) \
19441 TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19442 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
19443 TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
19445 /* Mnemonic that cannot be conditionalized. The ARM condition-code
19446 field is still 0xE. Many of the Thumb variants can be executed
19447 conditionally, so this is checked separately. */
19448 #define TUE(mnem, op, top, nops, ops, ae, te) \
19449 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19450 THUMB_VARIANT, do_##ae, do_##te }
19452 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
19453 Used by mnemonics that have very minimal differences in the encoding for
19454 ARM and Thumb variants and can be handled in a common function. */
19455 #define TUEc(mnem, op, top, nops, ops, en) \
19456 { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
19457 THUMB_VARIANT, do_##en, do_##en }
19459 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
19460 condition code field. */
19461 #define TUF(mnem, op, top, nops, ops, ae, te) \
19462 { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
19463 THUMB_VARIANT, do_##ae, do_##te }
19465 /* ARM-only variants of all the above. */
19466 #define CE(mnem, op, nops, ops, ae) \
19467 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19469 #define C3(mnem, op, nops, ops, ae) \
19470 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19472 /* Thumb-only variants of TCE and TUE. */
19473 #define ToC(mnem, top, nops, ops, te) \
19474 { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
19477 #define ToU(mnem, top, nops, ops, te) \
19478 { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
19481 /* Legacy mnemonics that always have conditional infix after the third
19483 #define CL(mnem, op, nops, ops, ae) \
19484 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19485 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19487 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
19488 #define cCE(mnem, op, nops, ops, ae) \
19489 { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19491 /* Legacy coprocessor instructions where conditional infix and conditional
19492 suffix are ambiguous. For consistency this includes all FPA instructions,
19493 not just the potentially ambiguous ones. */
19494 #define cCL(mnem, op, nops, ops, ae) \
19495 { mnem, OPS##nops ops, OT_cinfix3_legacy, \
19496 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19498 /* Coprocessor, takes either a suffix or a position-3 infix
19499 (for an FPA corner case). */
19500 #define C3E(mnem, op, nops, ops, ae) \
19501 { mnem, OPS##nops ops, OT_csuf_or_in3, \
19502 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
19504 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
19505 { m1 #m2 m3, OPS##nops ops, \
19506 sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
19507 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
19509 #define CM(m1, m2, op, nops, ops, ae) \
19510 xCM_ (m1, , m2, op, nops, ops, ae), \
19511 xCM_ (m1, eq, m2, op, nops, ops, ae), \
19512 xCM_ (m1, ne, m2, op, nops, ops, ae), \
19513 xCM_ (m1, cs, m2, op, nops, ops, ae), \
19514 xCM_ (m1, hs, m2, op, nops, ops, ae), \
19515 xCM_ (m1, cc, m2, op, nops, ops, ae), \
19516 xCM_ (m1, ul, m2, op, nops, ops, ae), \
19517 xCM_ (m1, lo, m2, op, nops, ops, ae), \
19518 xCM_ (m1, mi, m2, op, nops, ops, ae), \
19519 xCM_ (m1, pl, m2, op, nops, ops, ae), \
19520 xCM_ (m1, vs, m2, op, nops, ops, ae), \
19521 xCM_ (m1, vc, m2, op, nops, ops, ae), \
19522 xCM_ (m1, hi, m2, op, nops, ops, ae), \
19523 xCM_ (m1, ls, m2, op, nops, ops, ae), \
19524 xCM_ (m1, ge, m2, op, nops, ops, ae), \
19525 xCM_ (m1, lt, m2, op, nops, ops, ae), \
19526 xCM_ (m1, gt, m2, op, nops, ops, ae), \
19527 xCM_ (m1, le, m2, op, nops, ops, ae), \
19528 xCM_ (m1, al, m2, op, nops, ops, ae)
19530 #define UE(mnem, op, nops, ops, ae) \
19531 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19533 #define UF(mnem, op, nops, ops, ae) \
19534 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
19536 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
19537 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
19538 use the same encoding function for each. */
19539 #define NUF(mnem, op, nops, ops, enc) \
19540 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
19541 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19543 /* Neon data processing, version which indirects through neon_enc_tab for
19544 the various overloaded versions of opcodes. */
19545 #define nUF(mnem, op, nops, ops, enc) \
19546 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op, \
19547 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19549 /* Neon insn with conditional suffix for the ARM version, non-overloaded
19551 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
19552 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
19553 THUMB_VARIANT, do_##enc, do_##enc }
19555 #define NCE(mnem, op, nops, ops, enc) \
19556 NCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19558 #define NCEF(mnem, op, nops, ops, enc) \
19559 NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19561 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
19562 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
19563 { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op, \
19564 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
19566 #define nCE(mnem, op, nops, ops, enc) \
19567 nCE_tag (mnem, op, nops, ops, enc, OT_csuffix)
19569 #define nCEF(mnem, op, nops, ops, enc) \
19570 nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF)
19574 static const struct asm_opcode insns[] =
19576 #define ARM_VARIANT & arm_ext_v1 /* Core ARM Instructions. */
19577 #define THUMB_VARIANT & arm_ext_v4t
19578 tCE("and", 0000000, _and, 3, (RR, oRR, SH), arit, t_arit3c),
19579 tC3("ands", 0100000, _ands, 3, (RR, oRR, SH), arit, t_arit3c),
19580 tCE("eor", 0200000, _eor, 3, (RR, oRR, SH), arit, t_arit3c),
19581 tC3("eors", 0300000, _eors, 3, (RR, oRR, SH), arit, t_arit3c),
19582 tCE("sub", 0400000, _sub, 3, (RR, oRR, SH), arit, t_add_sub),
19583 tC3("subs", 0500000, _subs, 3, (RR, oRR, SH), arit, t_add_sub),
19584 tCE("add", 0800000, _add, 3, (RR, oRR, SHG), arit, t_add_sub),
19585 tC3("adds", 0900000, _adds, 3, (RR, oRR, SHG), arit, t_add_sub),
19586 tCE("adc", 0a00000, _adc, 3, (RR, oRR, SH), arit, t_arit3c),
19587 tC3("adcs", 0b00000, _adcs, 3, (RR, oRR, SH), arit, t_arit3c),
19588 tCE("sbc", 0c00000, _sbc, 3, (RR, oRR, SH), arit, t_arit3),
19589 tC3("sbcs", 0d00000, _sbcs, 3, (RR, oRR, SH), arit, t_arit3),
19590 tCE("orr", 1800000, _orr, 3, (RR, oRR, SH), arit, t_arit3c),
19591 tC3("orrs", 1900000, _orrs, 3, (RR, oRR, SH), arit, t_arit3c),
19592 tCE("bic", 1c00000, _bic, 3, (RR, oRR, SH), arit, t_arit3),
19593 tC3("bics", 1d00000, _bics, 3, (RR, oRR, SH), arit, t_arit3),
19595 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
19596 for setting PSR flag bits. They are obsolete in V6 and do not
19597 have Thumb equivalents. */
19598 tCE("tst", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19599 tC3w("tsts", 1100000, _tst, 2, (RR, SH), cmp, t_mvn_tst),
19600 CL("tstp", 110f000, 2, (RR, SH), cmp),
19601 tCE("cmp", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19602 tC3w("cmps", 1500000, _cmp, 2, (RR, SH), cmp, t_mov_cmp),
19603 CL("cmpp", 150f000, 2, (RR, SH), cmp),
19604 tCE("cmn", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19605 tC3w("cmns", 1700000, _cmn, 2, (RR, SH), cmp, t_mvn_tst),
19606 CL("cmnp", 170f000, 2, (RR, SH), cmp),
19608 tCE("mov", 1a00000, _mov, 2, (RR, SH), mov, t_mov_cmp),
19609 tC3("movs", 1b00000, _movs, 2, (RR, SHG), mov, t_mov_cmp),
19610 tCE("mvn", 1e00000, _mvn, 2, (RR, SH), mov, t_mvn_tst),
19611 tC3("mvns", 1f00000, _mvns, 2, (RR, SH), mov, t_mvn_tst),
19613 tCE("ldr", 4100000, _ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
19614 tC3("ldrb", 4500000, _ldrb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19615 tCE("str", 4000000, _str, _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
19617 OP_ADDRGLDR),ldst, t_ldst),
19618 tC3("strb", 4400000, _strb, 2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
19620 tCE("stm", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19621 tC3("stmia", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19622 tC3("stmea", 8800000, _stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19623 tCE("ldm", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19624 tC3("ldmia", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19625 tC3("ldmfd", 8900000, _ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19627 tCE("b", a000000, _b, 1, (EXPr), branch, t_branch),
19628 TCE("bl", b000000, f000f800, 1, (EXPr), bl, t_branch23),
19631 tCE("adr", 28f0000, _adr, 2, (RR, EXP), adr, t_adr),
19632 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
19633 tCE("nop", 1a00000, _nop, 1, (oI255c), nop, t_nop),
19634 tCE("udf", 7f000f0, _udf, 1, (oIffffb), bkpt, t_udf),
19636 /* Thumb-compatibility pseudo ops. */
19637 tCE("lsl", 1a00000, _lsl, 3, (RR, oRR, SH), shift, t_shift),
19638 tC3("lsls", 1b00000, _lsls, 3, (RR, oRR, SH), shift, t_shift),
19639 tCE("lsr", 1a00020, _lsr, 3, (RR, oRR, SH), shift, t_shift),
19640 tC3("lsrs", 1b00020, _lsrs, 3, (RR, oRR, SH), shift, t_shift),
19641 tCE("asr", 1a00040, _asr, 3, (RR, oRR, SH), shift, t_shift),
19642 tC3("asrs", 1b00040, _asrs, 3, (RR, oRR, SH), shift, t_shift),
19643 tCE("ror", 1a00060, _ror, 3, (RR, oRR, SH), shift, t_shift),
19644 tC3("rors", 1b00060, _rors, 3, (RR, oRR, SH), shift, t_shift),
19645 tCE("neg", 2600000, _neg, 2, (RR, RR), rd_rn, t_neg),
19646 tC3("negs", 2700000, _negs, 2, (RR, RR), rd_rn, t_neg),
19647 tCE("push", 92d0000, _push, 1, (REGLST), push_pop, t_push_pop),
19648 tCE("pop", 8bd0000, _pop, 1, (REGLST), push_pop, t_push_pop),
19650 /* These may simplify to neg. */
19651 TCE("rsb", 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
19652 TC3("rsbs", 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
19654 #undef THUMB_VARIANT
19655 #define THUMB_VARIANT & arm_ext_os
19657 TCE("swi", f000000, df00, 1, (EXPi), swi, t_swi),
19658 TCE("svc", f000000, df00, 1, (EXPi), swi, t_swi),
19660 #undef THUMB_VARIANT
19661 #define THUMB_VARIANT & arm_ext_v6
19663 TCE("cpy", 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
19665 /* V1 instructions with no Thumb analogue prior to V6T2. */
19666 #undef THUMB_VARIANT
19667 #define THUMB_VARIANT & arm_ext_v6t2
19669 TCE("teq", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19670 TC3w("teqs", 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
19671 CL("teqp", 130f000, 2, (RR, SH), cmp),
19673 TC3("ldrt", 4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19674 TC3("ldrbt", 4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19675 TC3("strt", 4200000, f8400e00, 2, (RR_npcsp, ADDR), ldstt, t_ldstt),
19676 TC3("strbt", 4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
19678 TC3("stmdb", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19679 TC3("stmfd", 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19681 TC3("ldmdb", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19682 TC3("ldmea", 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
19684 /* V1 instructions with no Thumb analogue at all. */
19685 CE("rsc", 0e00000, 3, (RR, oRR, SH), arit),
19686 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
19688 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
19689 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
19690 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
19691 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
19692 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
19693 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
19694 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
19695 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
19698 #define ARM_VARIANT & arm_ext_v2 /* ARM 2 - multiplies. */
19699 #undef THUMB_VARIANT
19700 #define THUMB_VARIANT & arm_ext_v4t
19702 tCE("mul", 0000090, _mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19703 tC3("muls", 0100090, _muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
19705 #undef THUMB_VARIANT
19706 #define THUMB_VARIANT & arm_ext_v6t2
19708 TCE("mla", 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
19709 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
19711 /* Generic coprocessor instructions. */
19712 TCE("cdp", e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19713 TCE("ldc", c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19714 TC3("ldcl", c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19715 TCE("stc", c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19716 TC3("stcl", c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19717 TCE("mcr", e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19718 TCE("mrc", e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b), co_reg, co_reg),
19721 #define ARM_VARIANT & arm_ext_v2s /* ARM 3 - swp instructions. */
19723 CE("swp", 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19724 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
19727 #define ARM_VARIANT & arm_ext_v3 /* ARM 6 Status register instructions. */
19728 #undef THUMB_VARIANT
19729 #define THUMB_VARIANT & arm_ext_msr
19731 TCE("mrs", 1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
19732 TCE("msr", 120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
19735 #define ARM_VARIANT & arm_ext_v3m /* ARM 7M long multiplies. */
19736 #undef THUMB_VARIANT
19737 #define THUMB_VARIANT & arm_ext_v6t2
19739 TCE("smull", 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19740 CM("smull","s", 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19741 TCE("umull", 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19742 CM("umull","s", 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19743 TCE("smlal", 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19744 CM("smlal","s", 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19745 TCE("umlal", 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
19746 CM("umlal","s", 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
19749 #define ARM_VARIANT & arm_ext_v4 /* ARM Architecture 4. */
19750 #undef THUMB_VARIANT
19751 #define THUMB_VARIANT & arm_ext_v4t
19753 tC3("ldrh", 01000b0, _ldrh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19754 tC3("strh", 00000b0, _strh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19755 tC3("ldrsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19756 tC3("ldrsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19757 tC3("ldsh", 01000f0, _ldrsh, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19758 tC3("ldsb", 01000d0, _ldrsb, 2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
19761 #define ARM_VARIANT & arm_ext_v4t_5
19763 /* ARM Architecture 4T. */
19764 /* Note: bx (and blx) are required on V5, even if the processor does
19765 not support Thumb. */
19766 TCE("bx", 12fff10, 4700, 1, (RR), bx, t_bx),
19769 #define ARM_VARIANT & arm_ext_v5 /* ARM Architecture 5T. */
19770 #undef THUMB_VARIANT
19771 #define THUMB_VARIANT & arm_ext_v5t
19773 /* Note: blx has 2 variants; the .value coded here is for
19774 BLX(2). Only this variant has conditional execution. */
19775 TCE("blx", 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
19776 TUE("bkpt", 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
19778 #undef THUMB_VARIANT
19779 #define THUMB_VARIANT & arm_ext_v6t2
19781 TCE("clz", 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
19782 TUF("ldc2", c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19783 TUF("ldc2l", c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19784 TUF("stc2", c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19785 TUF("stc2l", c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
19786 TUF("cdp2", e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
19787 TUF("mcr2", e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19788 TUF("mrc2", e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
19791 #define ARM_VARIANT & arm_ext_v5exp /* ARM Architecture 5TExP. */
19792 #undef THUMB_VARIANT
19793 #define THUMB_VARIANT & arm_ext_v5exp
19795 TCE("smlabb", 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19796 TCE("smlatb", 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19797 TCE("smlabt", 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19798 TCE("smlatt", 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19800 TCE("smlawb", 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19801 TCE("smlawt", 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
19803 TCE("smlalbb", 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19804 TCE("smlaltb", 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19805 TCE("smlalbt", 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19806 TCE("smlaltt", 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
19808 TCE("smulbb", 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19809 TCE("smultb", 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19810 TCE("smulbt", 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19811 TCE("smultt", 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19813 TCE("smulwb", 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19814 TCE("smulwt", 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19816 TCE("qadd", 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19817 TCE("qdadd", 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19818 TCE("qsub", 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19819 TCE("qdsub", 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, t_simd2),
19822 #define ARM_VARIANT & arm_ext_v5e /* ARM Architecture 5TE. */
19823 #undef THUMB_VARIANT
19824 #define THUMB_VARIANT & arm_ext_v6t2
19826 TUF("pld", 450f000, f810f000, 1, (ADDR), pld, t_pld),
19827 TC3("ldrd", 00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
19829 TC3("strd", 00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
19830 ADDRGLDRS), ldrd, t_ldstd),
19832 TCE("mcrr", c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19833 TCE("mrrc", c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19836 #define ARM_VARIANT & arm_ext_v5j /* ARM Architecture 5TEJ. */
19838 TCE("bxj", 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
19841 #define ARM_VARIANT & arm_ext_v6 /* ARM V6. */
19842 #undef THUMB_VARIANT
19843 #define THUMB_VARIANT & arm_ext_v6
19845 TUF("cpsie", 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
19846 TUF("cpsid", 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
19847 tCE("rev", 6bf0f30, _rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19848 tCE("rev16", 6bf0fb0, _rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19849 tCE("revsh", 6ff0fb0, _revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
19850 tCE("sxth", 6bf0070, _sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19851 tCE("uxth", 6ff0070, _uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19852 tCE("sxtb", 6af0070, _sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19853 tCE("uxtb", 6ef0070, _uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19854 TUF("setend", 1010000, b650, 1, (ENDI), setend, t_setend),
19856 #undef THUMB_VARIANT
19857 #define THUMB_VARIANT & arm_ext_v6t2_v8m
19859 TCE("ldrex", 1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR), ldrex, t_ldrex),
19860 TCE("strex", 1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
19862 #undef THUMB_VARIANT
19863 #define THUMB_VARIANT & arm_ext_v6t2
19865 TUF("mcrr2", c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19866 TUF("mrrc2", c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
19868 TCE("ssat", 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
19869 TCE("usat", 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
19871 /* ARM V6 not included in V7M. */
19872 #undef THUMB_VARIANT
19873 #define THUMB_VARIANT & arm_ext_v6_notm
19874 TUF("rfeia", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19875 TUF("rfe", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19876 UF(rfeib, 9900a00, 1, (RRw), rfe),
19877 UF(rfeda, 8100a00, 1, (RRw), rfe),
19878 TUF("rfedb", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19879 TUF("rfefd", 8900a00, e990c000, 1, (RRw), rfe, rfe),
19880 UF(rfefa, 8100a00, 1, (RRw), rfe),
19881 TUF("rfeea", 9100a00, e810c000, 1, (RRw), rfe, rfe),
19882 UF(rfeed, 9900a00, 1, (RRw), rfe),
19883 TUF("srsia", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19884 TUF("srs", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19885 TUF("srsea", 8c00500, e980c000, 2, (oRRw, I31w), srs, srs),
19886 UF(srsib, 9c00500, 2, (oRRw, I31w), srs),
19887 UF(srsfa, 9c00500, 2, (oRRw, I31w), srs),
19888 UF(srsda, 8400500, 2, (oRRw, I31w), srs),
19889 UF(srsed, 8400500, 2, (oRRw, I31w), srs),
19890 TUF("srsdb", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19891 TUF("srsfd", 9400500, e800c000, 2, (oRRw, I31w), srs, srs),
19892 TUF("cps", 1020000, f3af8100, 1, (I31b), imm0, t_cps),
19894 /* ARM V6 not included in V7M (eg. integer SIMD). */
19895 #undef THUMB_VARIANT
19896 #define THUMB_VARIANT & arm_ext_v6_dsp
19897 TCE("pkhbt", 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
19898 TCE("pkhtb", 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
19899 TCE("qadd16", 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19900 TCE("qadd8", 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19901 TCE("qasx", 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19902 /* Old name for QASX. */
19903 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19904 TCE("qsax", 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19905 /* Old name for QSAX. */
19906 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19907 TCE("qsub16", 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19908 TCE("qsub8", 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19909 TCE("sadd16", 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19910 TCE("sadd8", 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19911 TCE("sasx", 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19912 /* Old name for SASX. */
19913 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19914 TCE("shadd16", 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19915 TCE("shadd8", 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19916 TCE("shasx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19917 /* Old name for SHASX. */
19918 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19919 TCE("shsax", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19920 /* Old name for SHSAX. */
19921 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19922 TCE("shsub16", 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19923 TCE("shsub8", 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19924 TCE("ssax", 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19925 /* Old name for SSAX. */
19926 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19927 TCE("ssub16", 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19928 TCE("ssub8", 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19929 TCE("uadd16", 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19930 TCE("uadd8", 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19931 TCE("uasx", 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19932 /* Old name for UASX. */
19933 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19934 TCE("uhadd16", 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19935 TCE("uhadd8", 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19936 TCE("uhasx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19937 /* Old name for UHASX. */
19938 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19939 TCE("uhsax", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19940 /* Old name for UHSAX. */
19941 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19942 TCE("uhsub16", 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19943 TCE("uhsub8", 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19944 TCE("uqadd16", 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19945 TCE("uqadd8", 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19946 TCE("uqasx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19947 /* Old name for UQASX. */
19948 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19949 TCE("uqsax", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19950 /* Old name for UQSAX. */
19951 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19952 TCE("uqsub16", 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19953 TCE("uqsub8", 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19954 TCE("usub16", 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19955 TCE("usax", 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19956 /* Old name for USAX. */
19957 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19958 TCE("usub8", 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19959 TCE("sxtah", 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19960 TCE("sxtab16", 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19961 TCE("sxtab", 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19962 TCE("sxtb16", 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19963 TCE("uxtah", 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19964 TCE("uxtab16", 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19965 TCE("uxtab", 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
19966 TCE("uxtb16", 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
19967 TCE("sel", 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
19968 TCE("smlad", 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19969 TCE("smladx", 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19970 TCE("smlald", 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19971 TCE("smlaldx", 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19972 TCE("smlsd", 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19973 TCE("smlsdx", 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19974 TCE("smlsld", 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19975 TCE("smlsldx", 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
19976 TCE("smmla", 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19977 TCE("smmlar", 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19978 TCE("smmls", 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19979 TCE("smmlsr", 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19980 TCE("smmul", 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19981 TCE("smmulr", 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19982 TCE("smuad", 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19983 TCE("smuadx", 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19984 TCE("smusd", 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19985 TCE("smusdx", 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19986 TCE("ssat16", 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
19987 TCE("umaal", 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
19988 TCE("usad8", 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
19989 TCE("usada8", 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
19990 TCE("usat16", 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
19993 #define ARM_VARIANT & arm_ext_v6k
19994 #undef THUMB_VARIANT
19995 #define THUMB_VARIANT & arm_ext_v6k
19997 tCE("yield", 320f001, _yield, 0, (), noargs, t_hint),
19998 tCE("wfe", 320f002, _wfe, 0, (), noargs, t_hint),
19999 tCE("wfi", 320f003, _wfi, 0, (), noargs, t_hint),
20000 tCE("sev", 320f004, _sev, 0, (), noargs, t_hint),
20002 #undef THUMB_VARIANT
20003 #define THUMB_VARIANT & arm_ext_v6_notm
20004 TCE("ldrexd", 1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
20006 TCE("strexd", 1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
20007 RRnpcb), strexd, t_strexd),
20009 #undef THUMB_VARIANT
20010 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20011 TCE("ldrexb", 1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
20013 TCE("ldrexh", 1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
20015 TCE("strexb", 1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20017 TCE("strexh", 1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
20019 TUF("clrex", 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
20022 #define ARM_VARIANT & arm_ext_sec
20023 #undef THUMB_VARIANT
20024 #define THUMB_VARIANT & arm_ext_sec
20026 TCE("smc", 1600070, f7f08000, 1, (EXPi), smc, t_smc),
20029 #define ARM_VARIANT & arm_ext_virt
20030 #undef THUMB_VARIANT
20031 #define THUMB_VARIANT & arm_ext_virt
20033 TCE("hvc", 1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
20034 TCE("eret", 160006e, f3de8f00, 0, (), noargs, noargs),
20037 #define ARM_VARIANT & arm_ext_pan
20038 #undef THUMB_VARIANT
20039 #define THUMB_VARIANT & arm_ext_pan
20041 TUF("setpan", 1100000, b610, 1, (I7), setpan, t_setpan),
20044 #define ARM_VARIANT & arm_ext_v6t2
20045 #undef THUMB_VARIANT
20046 #define THUMB_VARIANT & arm_ext_v6t2
20048 TCE("bfc", 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
20049 TCE("bfi", 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
20050 TCE("sbfx", 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20051 TCE("ubfx", 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
20053 TCE("mls", 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
20054 TCE("rbit", 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
20056 TC3("ldrht", 03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20057 TC3("ldrsht", 03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20058 TC3("ldrsbt", 03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20059 TC3("strht", 02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
20062 #define ARM_VARIANT & arm_ext_v3
20063 #undef THUMB_VARIANT
20064 #define THUMB_VARIANT & arm_ext_v6t2
20066 TUE("csdb", 320f014, f3af8014, 0, (), noargs, t_csdb),
20067 TUF("ssbb", 57ff040, f3bf8f40, 0, (), noargs, t_csdb),
20068 TUF("pssbb", 57ff044, f3bf8f44, 0, (), noargs, t_csdb),
20071 #define ARM_VARIANT & arm_ext_v6t2
20072 #undef THUMB_VARIANT
20073 #define THUMB_VARIANT & arm_ext_v6t2_v8m
20074 TCE("movw", 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
20075 TCE("movt", 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
20077 /* Thumb-only instructions. */
20079 #define ARM_VARIANT NULL
20080 TUE("cbnz", 0, b900, 2, (RR, EXP), 0, t_cbz),
20081 TUE("cbz", 0, b100, 2, (RR, EXP), 0, t_cbz),
20083 /* ARM does not really have an IT instruction, so always allow it.
20084 The opcode is copied from Thumb in order to allow warnings in
20085 -mimplicit-it=[never | arm] modes. */
20087 #define ARM_VARIANT & arm_ext_v1
20088 #undef THUMB_VARIANT
20089 #define THUMB_VARIANT & arm_ext_v6t2
20091 TUE("it", bf08, bf08, 1, (COND), it, t_it),
20092 TUE("itt", bf0c, bf0c, 1, (COND), it, t_it),
20093 TUE("ite", bf04, bf04, 1, (COND), it, t_it),
20094 TUE("ittt", bf0e, bf0e, 1, (COND), it, t_it),
20095 TUE("itet", bf06, bf06, 1, (COND), it, t_it),
20096 TUE("itte", bf0a, bf0a, 1, (COND), it, t_it),
20097 TUE("itee", bf02, bf02, 1, (COND), it, t_it),
20098 TUE("itttt", bf0f, bf0f, 1, (COND), it, t_it),
20099 TUE("itett", bf07, bf07, 1, (COND), it, t_it),
20100 TUE("ittet", bf0b, bf0b, 1, (COND), it, t_it),
20101 TUE("iteet", bf03, bf03, 1, (COND), it, t_it),
20102 TUE("ittte", bf0d, bf0d, 1, (COND), it, t_it),
20103 TUE("itete", bf05, bf05, 1, (COND), it, t_it),
20104 TUE("ittee", bf09, bf09, 1, (COND), it, t_it),
20105 TUE("iteee", bf01, bf01, 1, (COND), it, t_it),
20106 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent. */
20107 TC3("rrx", 01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
20108 TC3("rrxs", 01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
20110 /* Thumb2 only instructions. */
20112 #define ARM_VARIANT NULL
20114 TCE("addw", 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20115 TCE("subw", 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
20116 TCE("orn", 0, ea600000, 3, (RR, oRR, SH), 0, t_orn),
20117 TCE("orns", 0, ea700000, 3, (RR, oRR, SH), 0, t_orn),
20118 TCE("tbb", 0, e8d0f000, 1, (TB), 0, t_tb),
20119 TCE("tbh", 0, e8d0f010, 1, (TB), 0, t_tb),
20121 /* Hardware division instructions. */
20123 #define ARM_VARIANT & arm_ext_adiv
20124 #undef THUMB_VARIANT
20125 #define THUMB_VARIANT & arm_ext_div
20127 TCE("sdiv", 710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
20128 TCE("udiv", 730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
20130 /* ARM V6M/V7 instructions. */
20132 #define ARM_VARIANT & arm_ext_barrier
20133 #undef THUMB_VARIANT
20134 #define THUMB_VARIANT & arm_ext_barrier
20136 TUF("dmb", 57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
20137 TUF("dsb", 57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
20138 TUF("isb", 57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
20140 /* ARM V7 instructions. */
20142 #define ARM_VARIANT & arm_ext_v7
20143 #undef THUMB_VARIANT
20144 #define THUMB_VARIANT & arm_ext_v7
20146 TUF("pli", 450f000, f910f000, 1, (ADDR), pli, t_pld),
20147 TCE("dbg", 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
20150 #define ARM_VARIANT & arm_ext_mp
20151 #undef THUMB_VARIANT
20152 #define THUMB_VARIANT & arm_ext_mp
20154 TUF("pldw", 410f000, f830f000, 1, (ADDR), pld, t_pld),
20156 /* AArchv8 instructions. */
20158 #define ARM_VARIANT & arm_ext_v8
20160 /* Instructions shared between armv8-a and armv8-m. */
20161 #undef THUMB_VARIANT
20162 #define THUMB_VARIANT & arm_ext_atomics
20164 TCE("lda", 1900c9f, e8d00faf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20165 TCE("ldab", 1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20166 TCE("ldah", 1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20167 TCE("stl", 180fc90, e8c00faf, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20168 TCE("stlb", 1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20169 TCE("stlh", 1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb), rm_rn, rd_rn),
20170 TCE("ldaex", 1900e9f, e8d00fef, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20171 TCE("ldaexb", 1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb), rd_rn, rd_rn),
20172 TCE("ldaexh", 1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
20173 TCE("stlex", 1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
20175 TCE("stlexb", 1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
20177 TCE("stlexh", 1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
20179 #undef THUMB_VARIANT
20180 #define THUMB_VARIANT & arm_ext_v8
20182 tCE("sevl", 320f005, _sevl, 0, (), noargs, t_hint),
20183 TUE("hlt", 1000070, ba80, 1, (oIffffb), bkpt, t_hlt),
20184 TCE("ldaexd", 1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
20186 TCE("stlexd", 1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
20188 /* ARMv8 T32 only. */
20190 #define ARM_VARIANT NULL
20191 TUF("dcps1", 0, f78f8001, 0, (), noargs, noargs),
20192 TUF("dcps2", 0, f78f8002, 0, (), noargs, noargs),
20193 TUF("dcps3", 0, f78f8003, 0, (), noargs, noargs),
20195 /* FP for ARMv8. */
20197 #define ARM_VARIANT & fpu_vfp_ext_armv8xd
20198 #undef THUMB_VARIANT
20199 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
20201 nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD), vsel),
20202 nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD), vsel),
20203 nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD), vsel),
20204 nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD), vsel),
20205 nUF(vmaxnm, _vmaxnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20206 nUF(vminnm, _vminnm, 3, (RNSDQ, oRNSDQ, RNSDQ), vmaxnm),
20207 nUF(vcvta, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvta),
20208 nUF(vcvtn, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtn),
20209 nUF(vcvtp, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtp),
20210 nUF(vcvtm, _vcvta, 2, (RNSDQ, oRNSDQ), neon_cvtm),
20211 nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ), vrintr),
20212 nCE(vrintz, _vrintr, 2, (RNSDQ, oRNSDQ), vrintz),
20213 nCE(vrintx, _vrintr, 2, (RNSDQ, oRNSDQ), vrintx),
20214 nUF(vrinta, _vrinta, 2, (RNSDQ, oRNSDQ), vrinta),
20215 nUF(vrintn, _vrinta, 2, (RNSDQ, oRNSDQ), vrintn),
20216 nUF(vrintp, _vrinta, 2, (RNSDQ, oRNSDQ), vrintp),
20217 nUF(vrintm, _vrinta, 2, (RNSDQ, oRNSDQ), vrintm),
20219 /* Crypto v1 extensions. */
20221 #define ARM_VARIANT & fpu_crypto_ext_armv8
20222 #undef THUMB_VARIANT
20223 #define THUMB_VARIANT & fpu_crypto_ext_armv8
20225 nUF(aese, _aes, 2, (RNQ, RNQ), aese),
20226 nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
20227 nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
20228 nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
20229 nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
20230 nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
20231 nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
20232 nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
20233 nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
20234 nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
20235 nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
20236 nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
20237 nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
20238 nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
20241 #define ARM_VARIANT & crc_ext_armv8
20242 #undef THUMB_VARIANT
20243 #define THUMB_VARIANT & crc_ext_armv8
20244 TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
20245 TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
20246 TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
20247 TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
20248 TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
20249 TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
20251 /* ARMv8.2 RAS extension. */
20253 #define ARM_VARIANT & arm_ext_ras
20254 #undef THUMB_VARIANT
20255 #define THUMB_VARIANT & arm_ext_ras
20256 TUE ("esb", 320f010, f3af8010, 0, (), noargs, noargs),
20259 #define ARM_VARIANT & arm_ext_v8_3
20260 #undef THUMB_VARIANT
20261 #define THUMB_VARIANT & arm_ext_v8_3
20262 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
20263 NUF (vcmla, 0, 4, (RNDQ, RNDQ, RNDQ_RNSC, EXPi), vcmla),
20264 NUF (vcadd, 0, 4, (RNDQ, RNDQ, RNDQ, EXPi), vcadd),
20267 #define ARM_VARIANT & fpu_neon_ext_dotprod
20268 #undef THUMB_VARIANT
20269 #define THUMB_VARIANT & fpu_neon_ext_dotprod
20270 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
20271 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
20274 #define ARM_VARIANT & fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
20275 #undef THUMB_VARIANT
20276 #define THUMB_VARIANT NULL
20278 cCE("wfs", e200110, 1, (RR), rd),
20279 cCE("rfs", e300110, 1, (RR), rd),
20280 cCE("wfc", e400110, 1, (RR), rd),
20281 cCE("rfc", e500110, 1, (RR), rd),
20283 cCL("ldfs", c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
20284 cCL("ldfd", c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
20285 cCL("ldfe", c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
20286 cCL("ldfp", c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
20288 cCL("stfs", c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
20289 cCL("stfd", c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
20290 cCL("stfe", c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
20291 cCL("stfp", c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
20293 cCL("mvfs", e008100, 2, (RF, RF_IF), rd_rm),
20294 cCL("mvfsp", e008120, 2, (RF, RF_IF), rd_rm),
20295 cCL("mvfsm", e008140, 2, (RF, RF_IF), rd_rm),
20296 cCL("mvfsz", e008160, 2, (RF, RF_IF), rd_rm),
20297 cCL("mvfd", e008180, 2, (RF, RF_IF), rd_rm),
20298 cCL("mvfdp", e0081a0, 2, (RF, RF_IF), rd_rm),
20299 cCL("mvfdm", e0081c0, 2, (RF, RF_IF), rd_rm),
20300 cCL("mvfdz", e0081e0, 2, (RF, RF_IF), rd_rm),
20301 cCL("mvfe", e088100, 2, (RF, RF_IF), rd_rm),
20302 cCL("mvfep", e088120, 2, (RF, RF_IF), rd_rm),
20303 cCL("mvfem", e088140, 2, (RF, RF_IF), rd_rm),
20304 cCL("mvfez", e088160, 2, (RF, RF_IF), rd_rm),
20306 cCL("mnfs", e108100, 2, (RF, RF_IF), rd_rm),
20307 cCL("mnfsp", e108120, 2, (RF, RF_IF), rd_rm),
20308 cCL("mnfsm", e108140, 2, (RF, RF_IF), rd_rm),
20309 cCL("mnfsz", e108160, 2, (RF, RF_IF), rd_rm),
20310 cCL("mnfd", e108180, 2, (RF, RF_IF), rd_rm),
20311 cCL("mnfdp", e1081a0, 2, (RF, RF_IF), rd_rm),
20312 cCL("mnfdm", e1081c0, 2, (RF, RF_IF), rd_rm),
20313 cCL("mnfdz", e1081e0, 2, (RF, RF_IF), rd_rm),
20314 cCL("mnfe", e188100, 2, (RF, RF_IF), rd_rm),
20315 cCL("mnfep", e188120, 2, (RF, RF_IF), rd_rm),
20316 cCL("mnfem", e188140, 2, (RF, RF_IF), rd_rm),
20317 cCL("mnfez", e188160, 2, (RF, RF_IF), rd_rm),
20319 cCL("abss", e208100, 2, (RF, RF_IF), rd_rm),
20320 cCL("abssp", e208120, 2, (RF, RF_IF), rd_rm),
20321 cCL("abssm", e208140, 2, (RF, RF_IF), rd_rm),
20322 cCL("abssz", e208160, 2, (RF, RF_IF), rd_rm),
20323 cCL("absd", e208180, 2, (RF, RF_IF), rd_rm),
20324 cCL("absdp", e2081a0, 2, (RF, RF_IF), rd_rm),
20325 cCL("absdm", e2081c0, 2, (RF, RF_IF), rd_rm),
20326 cCL("absdz", e2081e0, 2, (RF, RF_IF), rd_rm),
20327 cCL("abse", e288100, 2, (RF, RF_IF), rd_rm),
20328 cCL("absep", e288120, 2, (RF, RF_IF), rd_rm),
20329 cCL("absem", e288140, 2, (RF, RF_IF), rd_rm),
20330 cCL("absez", e288160, 2, (RF, RF_IF), rd_rm),
20332 cCL("rnds", e308100, 2, (RF, RF_IF), rd_rm),
20333 cCL("rndsp", e308120, 2, (RF, RF_IF), rd_rm),
20334 cCL("rndsm", e308140, 2, (RF, RF_IF), rd_rm),
20335 cCL("rndsz", e308160, 2, (RF, RF_IF), rd_rm),
20336 cCL("rndd", e308180, 2, (RF, RF_IF), rd_rm),
20337 cCL("rnddp", e3081a0, 2, (RF, RF_IF), rd_rm),
20338 cCL("rnddm", e3081c0, 2, (RF, RF_IF), rd_rm),
20339 cCL("rnddz", e3081e0, 2, (RF, RF_IF), rd_rm),
20340 cCL("rnde", e388100, 2, (RF, RF_IF), rd_rm),
20341 cCL("rndep", e388120, 2, (RF, RF_IF), rd_rm),
20342 cCL("rndem", e388140, 2, (RF, RF_IF), rd_rm),
20343 cCL("rndez", e388160, 2, (RF, RF_IF), rd_rm),
20345 cCL("sqts", e408100, 2, (RF, RF_IF), rd_rm),
20346 cCL("sqtsp", e408120, 2, (RF, RF_IF), rd_rm),
20347 cCL("sqtsm", e408140, 2, (RF, RF_IF), rd_rm),
20348 cCL("sqtsz", e408160, 2, (RF, RF_IF), rd_rm),
20349 cCL("sqtd", e408180, 2, (RF, RF_IF), rd_rm),
20350 cCL("sqtdp", e4081a0, 2, (RF, RF_IF), rd_rm),
20351 cCL("sqtdm", e4081c0, 2, (RF, RF_IF), rd_rm),
20352 cCL("sqtdz", e4081e0, 2, (RF, RF_IF), rd_rm),
20353 cCL("sqte", e488100, 2, (RF, RF_IF), rd_rm),
20354 cCL("sqtep", e488120, 2, (RF, RF_IF), rd_rm),
20355 cCL("sqtem", e488140, 2, (RF, RF_IF), rd_rm),
20356 cCL("sqtez", e488160, 2, (RF, RF_IF), rd_rm),
20358 cCL("logs", e508100, 2, (RF, RF_IF), rd_rm),
20359 cCL("logsp", e508120, 2, (RF, RF_IF), rd_rm),
20360 cCL("logsm", e508140, 2, (RF, RF_IF), rd_rm),
20361 cCL("logsz", e508160, 2, (RF, RF_IF), rd_rm),
20362 cCL("logd", e508180, 2, (RF, RF_IF), rd_rm),
20363 cCL("logdp", e5081a0, 2, (RF, RF_IF), rd_rm),
20364 cCL("logdm", e5081c0, 2, (RF, RF_IF), rd_rm),
20365 cCL("logdz", e5081e0, 2, (RF, RF_IF), rd_rm),
20366 cCL("loge", e588100, 2, (RF, RF_IF), rd_rm),
20367 cCL("logep", e588120, 2, (RF, RF_IF), rd_rm),
20368 cCL("logem", e588140, 2, (RF, RF_IF), rd_rm),
20369 cCL("logez", e588160, 2, (RF, RF_IF), rd_rm),
20371 cCL("lgns", e608100, 2, (RF, RF_IF), rd_rm),
20372 cCL("lgnsp", e608120, 2, (RF, RF_IF), rd_rm),
20373 cCL("lgnsm", e608140, 2, (RF, RF_IF), rd_rm),
20374 cCL("lgnsz", e608160, 2, (RF, RF_IF), rd_rm),
20375 cCL("lgnd", e608180, 2, (RF, RF_IF), rd_rm),
20376 cCL("lgndp", e6081a0, 2, (RF, RF_IF), rd_rm),
20377 cCL("lgndm", e6081c0, 2, (RF, RF_IF), rd_rm),
20378 cCL("lgndz", e6081e0, 2, (RF, RF_IF), rd_rm),
20379 cCL("lgne", e688100, 2, (RF, RF_IF), rd_rm),
20380 cCL("lgnep", e688120, 2, (RF, RF_IF), rd_rm),
20381 cCL("lgnem", e688140, 2, (RF, RF_IF), rd_rm),
20382 cCL("lgnez", e688160, 2, (RF, RF_IF), rd_rm),
20384 cCL("exps", e708100, 2, (RF, RF_IF), rd_rm),
20385 cCL("expsp", e708120, 2, (RF, RF_IF), rd_rm),
20386 cCL("expsm", e708140, 2, (RF, RF_IF), rd_rm),
20387 cCL("expsz", e708160, 2, (RF, RF_IF), rd_rm),
20388 cCL("expd", e708180, 2, (RF, RF_IF), rd_rm),
20389 cCL("expdp", e7081a0, 2, (RF, RF_IF), rd_rm),
20390 cCL("expdm", e7081c0, 2, (RF, RF_IF), rd_rm),
20391 cCL("expdz", e7081e0, 2, (RF, RF_IF), rd_rm),
20392 cCL("expe", e788100, 2, (RF, RF_IF), rd_rm),
20393 cCL("expep", e788120, 2, (RF, RF_IF), rd_rm),
20394 cCL("expem", e788140, 2, (RF, RF_IF), rd_rm),
20395 cCL("expdz", e788160, 2, (RF, RF_IF), rd_rm),
20397 cCL("sins", e808100, 2, (RF, RF_IF), rd_rm),
20398 cCL("sinsp", e808120, 2, (RF, RF_IF), rd_rm),
20399 cCL("sinsm", e808140, 2, (RF, RF_IF), rd_rm),
20400 cCL("sinsz", e808160, 2, (RF, RF_IF), rd_rm),
20401 cCL("sind", e808180, 2, (RF, RF_IF), rd_rm),
20402 cCL("sindp", e8081a0, 2, (RF, RF_IF), rd_rm),
20403 cCL("sindm", e8081c0, 2, (RF, RF_IF), rd_rm),
20404 cCL("sindz", e8081e0, 2, (RF, RF_IF), rd_rm),
20405 cCL("sine", e888100, 2, (RF, RF_IF), rd_rm),
20406 cCL("sinep", e888120, 2, (RF, RF_IF), rd_rm),
20407 cCL("sinem", e888140, 2, (RF, RF_IF), rd_rm),
20408 cCL("sinez", e888160, 2, (RF, RF_IF), rd_rm),
20410 cCL("coss", e908100, 2, (RF, RF_IF), rd_rm),
20411 cCL("cossp", e908120, 2, (RF, RF_IF), rd_rm),
20412 cCL("cossm", e908140, 2, (RF, RF_IF), rd_rm),
20413 cCL("cossz", e908160, 2, (RF, RF_IF), rd_rm),
20414 cCL("cosd", e908180, 2, (RF, RF_IF), rd_rm),
20415 cCL("cosdp", e9081a0, 2, (RF, RF_IF), rd_rm),
20416 cCL("cosdm", e9081c0, 2, (RF, RF_IF), rd_rm),
20417 cCL("cosdz", e9081e0, 2, (RF, RF_IF), rd_rm),
20418 cCL("cose", e988100, 2, (RF, RF_IF), rd_rm),
20419 cCL("cosep", e988120, 2, (RF, RF_IF), rd_rm),
20420 cCL("cosem", e988140, 2, (RF, RF_IF), rd_rm),
20421 cCL("cosez", e988160, 2, (RF, RF_IF), rd_rm),
20423 cCL("tans", ea08100, 2, (RF, RF_IF), rd_rm),
20424 cCL("tansp", ea08120, 2, (RF, RF_IF), rd_rm),
20425 cCL("tansm", ea08140, 2, (RF, RF_IF), rd_rm),
20426 cCL("tansz", ea08160, 2, (RF, RF_IF), rd_rm),
20427 cCL("tand", ea08180, 2, (RF, RF_IF), rd_rm),
20428 cCL("tandp", ea081a0, 2, (RF, RF_IF), rd_rm),
20429 cCL("tandm", ea081c0, 2, (RF, RF_IF), rd_rm),
20430 cCL("tandz", ea081e0, 2, (RF, RF_IF), rd_rm),
20431 cCL("tane", ea88100, 2, (RF, RF_IF), rd_rm),
20432 cCL("tanep", ea88120, 2, (RF, RF_IF), rd_rm),
20433 cCL("tanem", ea88140, 2, (RF, RF_IF), rd_rm),
20434 cCL("tanez", ea88160, 2, (RF, RF_IF), rd_rm),
20436 cCL("asns", eb08100, 2, (RF, RF_IF), rd_rm),
20437 cCL("asnsp", eb08120, 2, (RF, RF_IF), rd_rm),
20438 cCL("asnsm", eb08140, 2, (RF, RF_IF), rd_rm),
20439 cCL("asnsz", eb08160, 2, (RF, RF_IF), rd_rm),
20440 cCL("asnd", eb08180, 2, (RF, RF_IF), rd_rm),
20441 cCL("asndp", eb081a0, 2, (RF, RF_IF), rd_rm),
20442 cCL("asndm", eb081c0, 2, (RF, RF_IF), rd_rm),
20443 cCL("asndz", eb081e0, 2, (RF, RF_IF), rd_rm),
20444 cCL("asne", eb88100, 2, (RF, RF_IF), rd_rm),
20445 cCL("asnep", eb88120, 2, (RF, RF_IF), rd_rm),
20446 cCL("asnem", eb88140, 2, (RF, RF_IF), rd_rm),
20447 cCL("asnez", eb88160, 2, (RF, RF_IF), rd_rm),
20449 cCL("acss", ec08100, 2, (RF, RF_IF), rd_rm),
20450 cCL("acssp", ec08120, 2, (RF, RF_IF), rd_rm),
20451 cCL("acssm", ec08140, 2, (RF, RF_IF), rd_rm),
20452 cCL("acssz", ec08160, 2, (RF, RF_IF), rd_rm),
20453 cCL("acsd", ec08180, 2, (RF, RF_IF), rd_rm),
20454 cCL("acsdp", ec081a0, 2, (RF, RF_IF), rd_rm),
20455 cCL("acsdm", ec081c0, 2, (RF, RF_IF), rd_rm),
20456 cCL("acsdz", ec081e0, 2, (RF, RF_IF), rd_rm),
20457 cCL("acse", ec88100, 2, (RF, RF_IF), rd_rm),
20458 cCL("acsep", ec88120, 2, (RF, RF_IF), rd_rm),
20459 cCL("acsem", ec88140, 2, (RF, RF_IF), rd_rm),
20460 cCL("acsez", ec88160, 2, (RF, RF_IF), rd_rm),
20462 cCL("atns", ed08100, 2, (RF, RF_IF), rd_rm),
20463 cCL("atnsp", ed08120, 2, (RF, RF_IF), rd_rm),
20464 cCL("atnsm", ed08140, 2, (RF, RF_IF), rd_rm),
20465 cCL("atnsz", ed08160, 2, (RF, RF_IF), rd_rm),
20466 cCL("atnd", ed08180, 2, (RF, RF_IF), rd_rm),
20467 cCL("atndp", ed081a0, 2, (RF, RF_IF), rd_rm),
20468 cCL("atndm", ed081c0, 2, (RF, RF_IF), rd_rm),
20469 cCL("atndz", ed081e0, 2, (RF, RF_IF), rd_rm),
20470 cCL("atne", ed88100, 2, (RF, RF_IF), rd_rm),
20471 cCL("atnep", ed88120, 2, (RF, RF_IF), rd_rm),
20472 cCL("atnem", ed88140, 2, (RF, RF_IF), rd_rm),
20473 cCL("atnez", ed88160, 2, (RF, RF_IF), rd_rm),
20475 cCL("urds", ee08100, 2, (RF, RF_IF), rd_rm),
20476 cCL("urdsp", ee08120, 2, (RF, RF_IF), rd_rm),
20477 cCL("urdsm", ee08140, 2, (RF, RF_IF), rd_rm),
20478 cCL("urdsz", ee08160, 2, (RF, RF_IF), rd_rm),
20479 cCL("urdd", ee08180, 2, (RF, RF_IF), rd_rm),
20480 cCL("urddp", ee081a0, 2, (RF, RF_IF), rd_rm),
20481 cCL("urddm", ee081c0, 2, (RF, RF_IF), rd_rm),
20482 cCL("urddz", ee081e0, 2, (RF, RF_IF), rd_rm),
20483 cCL("urde", ee88100, 2, (RF, RF_IF), rd_rm),
20484 cCL("urdep", ee88120, 2, (RF, RF_IF), rd_rm),
20485 cCL("urdem", ee88140, 2, (RF, RF_IF), rd_rm),
20486 cCL("urdez", ee88160, 2, (RF, RF_IF), rd_rm),
20488 cCL("nrms", ef08100, 2, (RF, RF_IF), rd_rm),
20489 cCL("nrmsp", ef08120, 2, (RF, RF_IF), rd_rm),
20490 cCL("nrmsm", ef08140, 2, (RF, RF_IF), rd_rm),
20491 cCL("nrmsz", ef08160, 2, (RF, RF_IF), rd_rm),
20492 cCL("nrmd", ef08180, 2, (RF, RF_IF), rd_rm),
20493 cCL("nrmdp", ef081a0, 2, (RF, RF_IF), rd_rm),
20494 cCL("nrmdm", ef081c0, 2, (RF, RF_IF), rd_rm),
20495 cCL("nrmdz", ef081e0, 2, (RF, RF_IF), rd_rm),
20496 cCL("nrme", ef88100, 2, (RF, RF_IF), rd_rm),
20497 cCL("nrmep", ef88120, 2, (RF, RF_IF), rd_rm),
20498 cCL("nrmem", ef88140, 2, (RF, RF_IF), rd_rm),
20499 cCL("nrmez", ef88160, 2, (RF, RF_IF), rd_rm),
20501 cCL("adfs", e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
20502 cCL("adfsp", e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
20503 cCL("adfsm", e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
20504 cCL("adfsz", e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
20505 cCL("adfd", e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
20506 cCL("adfdp", e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20507 cCL("adfdm", e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20508 cCL("adfdz", e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20509 cCL("adfe", e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
20510 cCL("adfep", e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
20511 cCL("adfem", e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
20512 cCL("adfez", e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
20514 cCL("sufs", e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
20515 cCL("sufsp", e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
20516 cCL("sufsm", e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
20517 cCL("sufsz", e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
20518 cCL("sufd", e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
20519 cCL("sufdp", e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20520 cCL("sufdm", e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20521 cCL("sufdz", e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20522 cCL("sufe", e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
20523 cCL("sufep", e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
20524 cCL("sufem", e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
20525 cCL("sufez", e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
20527 cCL("rsfs", e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
20528 cCL("rsfsp", e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
20529 cCL("rsfsm", e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
20530 cCL("rsfsz", e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
20531 cCL("rsfd", e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
20532 cCL("rsfdp", e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20533 cCL("rsfdm", e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20534 cCL("rsfdz", e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20535 cCL("rsfe", e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
20536 cCL("rsfep", e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
20537 cCL("rsfem", e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
20538 cCL("rsfez", e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
20540 cCL("mufs", e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
20541 cCL("mufsp", e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
20542 cCL("mufsm", e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
20543 cCL("mufsz", e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
20544 cCL("mufd", e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
20545 cCL("mufdp", e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20546 cCL("mufdm", e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20547 cCL("mufdz", e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20548 cCL("mufe", e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
20549 cCL("mufep", e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
20550 cCL("mufem", e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
20551 cCL("mufez", e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
20553 cCL("dvfs", e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
20554 cCL("dvfsp", e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
20555 cCL("dvfsm", e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
20556 cCL("dvfsz", e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
20557 cCL("dvfd", e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
20558 cCL("dvfdp", e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20559 cCL("dvfdm", e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20560 cCL("dvfdz", e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20561 cCL("dvfe", e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
20562 cCL("dvfep", e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
20563 cCL("dvfem", e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
20564 cCL("dvfez", e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
20566 cCL("rdfs", e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
20567 cCL("rdfsp", e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
20568 cCL("rdfsm", e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
20569 cCL("rdfsz", e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
20570 cCL("rdfd", e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
20571 cCL("rdfdp", e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20572 cCL("rdfdm", e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20573 cCL("rdfdz", e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20574 cCL("rdfe", e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
20575 cCL("rdfep", e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
20576 cCL("rdfem", e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
20577 cCL("rdfez", e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
20579 cCL("pows", e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
20580 cCL("powsp", e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
20581 cCL("powsm", e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
20582 cCL("powsz", e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
20583 cCL("powd", e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
20584 cCL("powdp", e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20585 cCL("powdm", e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20586 cCL("powdz", e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20587 cCL("powe", e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
20588 cCL("powep", e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
20589 cCL("powem", e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
20590 cCL("powez", e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
20592 cCL("rpws", e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
20593 cCL("rpwsp", e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
20594 cCL("rpwsm", e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
20595 cCL("rpwsz", e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
20596 cCL("rpwd", e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
20597 cCL("rpwdp", e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20598 cCL("rpwdm", e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20599 cCL("rpwdz", e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20600 cCL("rpwe", e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
20601 cCL("rpwep", e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
20602 cCL("rpwem", e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
20603 cCL("rpwez", e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
20605 cCL("rmfs", e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
20606 cCL("rmfsp", e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
20607 cCL("rmfsm", e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
20608 cCL("rmfsz", e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
20609 cCL("rmfd", e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
20610 cCL("rmfdp", e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20611 cCL("rmfdm", e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20612 cCL("rmfdz", e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20613 cCL("rmfe", e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
20614 cCL("rmfep", e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
20615 cCL("rmfem", e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
20616 cCL("rmfez", e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
20618 cCL("fmls", e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
20619 cCL("fmlsp", e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
20620 cCL("fmlsm", e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
20621 cCL("fmlsz", e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
20622 cCL("fmld", e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
20623 cCL("fmldp", e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20624 cCL("fmldm", e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20625 cCL("fmldz", e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20626 cCL("fmle", e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
20627 cCL("fmlep", e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
20628 cCL("fmlem", e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
20629 cCL("fmlez", e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
20631 cCL("fdvs", ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20632 cCL("fdvsp", ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20633 cCL("fdvsm", ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20634 cCL("fdvsz", ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20635 cCL("fdvd", ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20636 cCL("fdvdp", ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20637 cCL("fdvdm", ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20638 cCL("fdvdz", ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20639 cCL("fdve", ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20640 cCL("fdvep", ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20641 cCL("fdvem", ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20642 cCL("fdvez", ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20644 cCL("frds", eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20645 cCL("frdsp", eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20646 cCL("frdsm", eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20647 cCL("frdsz", eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20648 cCL("frdd", eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20649 cCL("frddp", eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20650 cCL("frddm", eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20651 cCL("frddz", eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20652 cCL("frde", eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20653 cCL("frdep", eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20654 cCL("frdem", eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20655 cCL("frdez", eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20657 cCL("pols", ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
20658 cCL("polsp", ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
20659 cCL("polsm", ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
20660 cCL("polsz", ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
20661 cCL("pold", ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
20662 cCL("poldp", ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
20663 cCL("poldm", ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
20664 cCL("poldz", ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
20665 cCL("pole", ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
20666 cCL("polep", ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
20667 cCL("polem", ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
20668 cCL("polez", ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
20670 cCE("cmf", e90f110, 2, (RF, RF_IF), fpa_cmp),
20671 C3E("cmfe", ed0f110, 2, (RF, RF_IF), fpa_cmp),
20672 cCE("cnf", eb0f110, 2, (RF, RF_IF), fpa_cmp),
20673 C3E("cnfe", ef0f110, 2, (RF, RF_IF), fpa_cmp),
20675 cCL("flts", e000110, 2, (RF, RR), rn_rd),
20676 cCL("fltsp", e000130, 2, (RF, RR), rn_rd),
20677 cCL("fltsm", e000150, 2, (RF, RR), rn_rd),
20678 cCL("fltsz", e000170, 2, (RF, RR), rn_rd),
20679 cCL("fltd", e000190, 2, (RF, RR), rn_rd),
20680 cCL("fltdp", e0001b0, 2, (RF, RR), rn_rd),
20681 cCL("fltdm", e0001d0, 2, (RF, RR), rn_rd),
20682 cCL("fltdz", e0001f0, 2, (RF, RR), rn_rd),
20683 cCL("flte", e080110, 2, (RF, RR), rn_rd),
20684 cCL("fltep", e080130, 2, (RF, RR), rn_rd),
20685 cCL("fltem", e080150, 2, (RF, RR), rn_rd),
20686 cCL("fltez", e080170, 2, (RF, RR), rn_rd),
20688 /* The implementation of the FIX instruction is broken on some
20689 assemblers, in that it accepts a precision specifier as well as a
20690 rounding specifier, despite the fact that this is meaningless.
20691 To be more compatible, we accept it as well, though of course it
20692 does not set any bits. */
20693 cCE("fix", e100110, 2, (RR, RF), rd_rm),
20694 cCL("fixp", e100130, 2, (RR, RF), rd_rm),
20695 cCL("fixm", e100150, 2, (RR, RF), rd_rm),
20696 cCL("fixz", e100170, 2, (RR, RF), rd_rm),
20697 cCL("fixsp", e100130, 2, (RR, RF), rd_rm),
20698 cCL("fixsm", e100150, 2, (RR, RF), rd_rm),
20699 cCL("fixsz", e100170, 2, (RR, RF), rd_rm),
20700 cCL("fixdp", e100130, 2, (RR, RF), rd_rm),
20701 cCL("fixdm", e100150, 2, (RR, RF), rd_rm),
20702 cCL("fixdz", e100170, 2, (RR, RF), rd_rm),
20703 cCL("fixep", e100130, 2, (RR, RF), rd_rm),
20704 cCL("fixem", e100150, 2, (RR, RF), rd_rm),
20705 cCL("fixez", e100170, 2, (RR, RF), rd_rm),
20707 /* Instructions that were new with the real FPA, call them V2. */
20709 #define ARM_VARIANT & fpu_fpa_ext_v2
20711 cCE("lfm", c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20712 cCL("lfmfd", c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20713 cCL("lfmea", d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20714 cCE("sfm", c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20715 cCL("sfmfd", d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20716 cCL("sfmea", c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
20719 #define ARM_VARIANT & fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
20721 /* Moves and type conversions. */
20722 cCE("fcpys", eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
20723 cCE("fmrs", e100a10, 2, (RR, RVS), vfp_reg_from_sp),
20724 cCE("fmsr", e000a10, 2, (RVS, RR), vfp_sp_from_reg),
20725 cCE("fmstat", ef1fa10, 0, (), noargs),
20726 cCE("vmrs", ef00a10, 2, (APSR_RR, RVC), vmrs),
20727 cCE("vmsr", ee00a10, 2, (RVC, RR), vmsr),
20728 cCE("fsitos", eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
20729 cCE("fuitos", eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
20730 cCE("ftosis", ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
20731 cCE("ftosizs", ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20732 cCE("ftouis", ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
20733 cCE("ftouizs", ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
20734 cCE("fmrx", ef00a10, 2, (RR, RVC), rd_rn),
20735 cCE("fmxr", ee00a10, 2, (RVC, RR), rn_rd),
20737 /* Memory operations. */
20738 cCE("flds", d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20739 cCE("fsts", d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
20740 cCE("fldmias", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20741 cCE("fldmfds", c900a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20742 cCE("fldmdbs", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20743 cCE("fldmeas", d300a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20744 cCE("fldmiax", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20745 cCE("fldmfdx", c900b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20746 cCE("fldmdbx", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20747 cCE("fldmeax", d300b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20748 cCE("fstmias", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20749 cCE("fstmeas", c800a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmia),
20750 cCE("fstmdbs", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20751 cCE("fstmfds", d200a00, 2, (RRnpctw, VRSLST), vfp_sp_ldstmdb),
20752 cCE("fstmiax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20753 cCE("fstmeax", c800b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmia),
20754 cCE("fstmdbx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20755 cCE("fstmfdx", d200b00, 2, (RRnpctw, VRDLST), vfp_xp_ldstmdb),
20757 /* Monadic operations. */
20758 cCE("fabss", eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
20759 cCE("fnegs", eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
20760 cCE("fsqrts", eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
20762 /* Dyadic operations. */
20763 cCE("fadds", e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20764 cCE("fsubs", e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20765 cCE("fmuls", e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20766 cCE("fdivs", e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20767 cCE("fmacs", e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20768 cCE("fmscs", e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20769 cCE("fnmuls", e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20770 cCE("fnmacs", e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20771 cCE("fnmscs", e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
20774 cCE("fcmps", eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
20775 cCE("fcmpzs", eb50a40, 1, (RVS), vfp_sp_compare_z),
20776 cCE("fcmpes", eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
20777 cCE("fcmpezs", eb50ac0, 1, (RVS), vfp_sp_compare_z),
20779 /* Double precision load/store are still present on single precision
20780 implementations. */
20781 cCE("fldd", d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20782 cCE("fstd", d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
20783 cCE("fldmiad", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20784 cCE("fldmfdd", c900b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20785 cCE("fldmdbd", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20786 cCE("fldmead", d300b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20787 cCE("fstmiad", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20788 cCE("fstmead", c800b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmia),
20789 cCE("fstmdbd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20790 cCE("fstmfdd", d200b00, 2, (RRnpctw, VRDLST), vfp_dp_ldstmdb),
20793 #define ARM_VARIANT & fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
20795 /* Moves and type conversions. */
20796 cCE("fcpyd", eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20797 cCE("fcvtds", eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20798 cCE("fcvtsd", eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20799 cCE("fmdhr", e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
20800 cCE("fmdlr", e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
20801 cCE("fmrdh", e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
20802 cCE("fmrdl", e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
20803 cCE("fsitod", eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
20804 cCE("fuitod", eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
20805 cCE("ftosid", ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20806 cCE("ftosizd", ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20807 cCE("ftouid", ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
20808 cCE("ftouizd", ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
20810 /* Monadic operations. */
20811 cCE("fabsd", eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20812 cCE("fnegd", eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20813 cCE("fsqrtd", eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20815 /* Dyadic operations. */
20816 cCE("faddd", e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20817 cCE("fsubd", e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20818 cCE("fmuld", e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20819 cCE("fdivd", e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20820 cCE("fmacd", e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20821 cCE("fmscd", e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20822 cCE("fnmuld", e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20823 cCE("fnmacd", e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20824 cCE("fnmscd", e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
20827 cCE("fcmpd", eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
20828 cCE("fcmpzd", eb50b40, 1, (RVD), vfp_dp_rd),
20829 cCE("fcmped", eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
20830 cCE("fcmpezd", eb50bc0, 1, (RVD), vfp_dp_rd),
20833 #define ARM_VARIANT & fpu_vfp_ext_v2
20835 cCE("fmsrr", c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
20836 cCE("fmrrs", c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
20837 cCE("fmdrr", c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
20838 cCE("fmrrd", c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
20840 /* Instructions which may belong to either the Neon or VFP instruction sets.
20841 Individual encoder functions perform additional architecture checks. */
20843 #define ARM_VARIANT & fpu_vfp_ext_v1xd
20844 #undef THUMB_VARIANT
20845 #define THUMB_VARIANT & fpu_vfp_ext_v1xd
20847 /* These mnemonics are unique to VFP. */
20848 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
20849 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
20850 nCE(vnmul, _vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20851 nCE(vnmla, _vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20852 nCE(vnmls, _vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
20853 nCE(vcmp, _vcmp, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20854 nCE(vcmpe, _vcmpe, 2, (RVSD, RSVD_FI0), vfp_nsyn_cmp),
20855 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
20856 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
20857 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
20859 /* Mnemonics shared by Neon and VFP. */
20860 nCEF(vmul, _vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
20861 nCEF(vmla, _vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20862 nCEF(vmls, _vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
20864 nCEF(vadd, _vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20865 nCEF(vsub, _vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
20867 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20868 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
20870 NCE(vldm, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20871 NCE(vldmia, c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20872 NCE(vldmdb, d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20873 NCE(vstm, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20874 NCE(vstmia, c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20875 NCE(vstmdb, d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
20876 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20877 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
20879 nCEF(vcvt, _vcvt, 3, (RNSDQ, RNSDQ, oI32z), neon_cvt),
20880 nCEF(vcvtr, _vcvt, 2, (RNSDQ, RNSDQ), neon_cvtr),
20881 NCEF(vcvtb, eb20a40, 2, (RVSD, RVSD), neon_cvtb),
20882 NCEF(vcvtt, eb20a40, 2, (RVSD, RVSD), neon_cvtt),
20885 /* NOTE: All VMOV encoding is special-cased! */
20886 NCE(vmov, 0, 1, (VMOV), neon_mov),
20887 NCE(vmovq, 0, 1, (VMOV), neon_mov),
20890 #define ARM_VARIANT & arm_ext_fp16
20891 #undef THUMB_VARIANT
20892 #define THUMB_VARIANT & arm_ext_fp16
20893 /* New instructions added from v8.2, allowing the extraction and insertion of
20894 the upper 16 bits of a 32-bit vector register. */
20895 NCE (vmovx, eb00a40, 2, (RVS, RVS), neon_movhf),
20896 NCE (vins, eb00ac0, 2, (RVS, RVS), neon_movhf),
20898 /* New backported fma/fms instructions optional in v8.2. */
20899 NCE (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
20900 NCE (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
20902 #undef THUMB_VARIANT
20903 #define THUMB_VARIANT & fpu_neon_ext_v1
20905 #define ARM_VARIANT & fpu_neon_ext_v1
20907 /* Data processing with three registers of the same length. */
20908 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
20909 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
20910 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
20911 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20912 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20913 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20914 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20915 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
20916 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
20917 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
20918 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20919 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20920 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
20921 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
20922 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20923 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20924 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_rshl),
20925 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_rshl),
20926 /* If not immediate, fall back to neon_dyadic_i64_su.
20927 shl_imm should accept I8 I16 I32 I64,
20928 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
20929 nUF(vshl, _vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
20930 nUF(vshlq, _vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
20931 nUF(vqshl, _vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
20932 nUF(vqshlq, _vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
20933 /* Logic ops, types optional & ignored. */
20934 nUF(vand, _vand, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20935 nUF(vandq, _vand, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20936 nUF(vbic, _vbic, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20937 nUF(vbicq, _vbic, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20938 nUF(vorr, _vorr, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20939 nUF(vorrq, _vorr, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20940 nUF(vorn, _vorn, 3, (RNDQ, oRNDQ, RNDQ_Ibig), neon_logic),
20941 nUF(vornq, _vorn, 3, (RNQ, oRNQ, RNDQ_Ibig), neon_logic),
20942 nUF(veor, _veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
20943 nUF(veorq, _veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
20944 /* Bitfield ops, untyped. */
20945 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20946 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20947 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20948 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20949 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
20950 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
20951 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32. */
20952 nUF(vabd, _vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20953 nUF(vabdq, _vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20954 nUF(vmax, _vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20955 nUF(vmaxq, _vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20956 nUF(vmin, _vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
20957 nUF(vminq, _vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
20958 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
20959 back to neon_dyadic_if_su. */
20960 nUF(vcge, _vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20961 nUF(vcgeq, _vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20962 nUF(vcgt, _vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
20963 nUF(vcgtq, _vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
20964 nUF(vclt, _vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20965 nUF(vcltq, _vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20966 nUF(vcle, _vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
20967 nUF(vcleq, _vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
20968 /* Comparison. Type I8 I16 I32 F32. */
20969 nUF(vceq, _vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
20970 nUF(vceqq, _vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
20971 /* As above, D registers only. */
20972 nUF(vpmax, _vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20973 nUF(vpmin, _vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
20974 /* Int and float variants, signedness unimportant. */
20975 nUF(vmlaq, _vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20976 nUF(vmlsq, _vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
20977 nUF(vpadd, _vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
20978 /* Add/sub take types I8 I16 I32 I64 F32. */
20979 nUF(vaddq, _vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20980 nUF(vsubq, _vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
20981 /* vtst takes sizes 8, 16, 32. */
20982 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
20983 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
20984 /* VMUL takes I8 I16 I32 F32 P8. */
20985 nUF(vmulq, _vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
20986 /* VQD{R}MULH takes S16 S32. */
20987 nUF(vqdmulh, _vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20988 nUF(vqdmulhq, _vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20989 nUF(vqrdmulh, _vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
20990 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
20991 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20992 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20993 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
20994 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
20995 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20996 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20997 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
20998 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
20999 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21000 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21001 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
21002 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
21003 /* ARM v8.1 extension. */
21004 nUF (vqrdmlah, _vqrdmlah, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21005 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21006 nUF (vqrdmlsh, _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
21007 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qrdmlah),
21009 /* Two address, int/float. Types S8 S16 S32 F32. */
21010 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
21011 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
21013 /* Data processing with two registers and a shift amount. */
21014 /* Right shifts, and variants with rounding.
21015 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
21016 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21017 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21018 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
21019 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
21020 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21021 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21022 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
21023 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
21024 /* Shift and insert. Sizes accepted 8 16 32 64. */
21025 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
21026 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
21027 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
21028 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
21029 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
21030 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
21031 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
21032 /* Right shift immediate, saturating & narrowing, with rounding variants.
21033 Types accepted S16 S32 S64 U16 U32 U64. */
21034 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21035 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
21036 /* As above, unsigned. Types accepted S16 S32 S64. */
21037 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21038 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
21039 /* Right shift narrowing. Types accepted I16 I32 I64. */
21040 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21041 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
21042 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
21043 nUF(vshll, _vshll, 3, (RNQ, RND, I32), neon_shll),
21044 /* CVT with optional immediate for fixed-point variant. */
21045 nUF(vcvtq, _vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
21047 nUF(vmvn, _vmvn, 2, (RNDQ, RNDQ_Ibig), neon_mvn),
21048 nUF(vmvnq, _vmvn, 2, (RNQ, RNDQ_Ibig), neon_mvn),
21050 /* Data processing, three registers of different lengths. */
21051 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
21052 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
21053 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
21054 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
21055 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
21056 /* If not scalar, fall back to neon_dyadic_long.
21057 Vector types as above, scalar types S16 S32 U16 U32. */
21058 nUF(vmlal, _vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21059 nUF(vmlsl, _vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
21060 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
21061 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21062 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
21063 /* Dyadic, narrowing insns. Types I16 I32 I64. */
21064 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21065 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21066 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21067 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
21068 /* Saturating doubling multiplies. Types S16 S32. */
21069 nUF(vqdmlal, _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21070 nUF(vqdmlsl, _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21071 nUF(vqdmull, _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
21072 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
21073 S16 S32 U16 U32. */
21074 nUF(vmull, _vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
21076 /* Extract. Size 8. */
21077 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
21078 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I15), neon_ext),
21080 /* Two registers, miscellaneous. */
21081 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
21082 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
21083 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
21084 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
21085 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
21086 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
21087 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
21088 /* Vector replicate. Sizes 8 16 32. */
21089 nCE(vdup, _vdup, 2, (RNDQ, RR_RNSC), neon_dup),
21090 nCE(vdupq, _vdup, 2, (RNQ, RR_RNSC), neon_dup),
21091 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
21092 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
21093 /* VMOVN. Types I16 I32 I64. */
21094 nUF(vmovn, _vmovn, 2, (RND, RNQ), neon_movn),
21095 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
21096 nUF(vqmovn, _vqmovn, 2, (RND, RNQ), neon_qmovn),
21097 /* VQMOVUN. Types S16 S32 S64. */
21098 nUF(vqmovun, _vqmovun, 2, (RND, RNQ), neon_qmovun),
21099 /* VZIP / VUZP. Sizes 8 16 32. */
21100 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
21101 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
21102 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
21103 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
21104 /* VQABS / VQNEG. Types S8 S16 S32. */
21105 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21106 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
21107 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
21108 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
21109 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
21110 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
21111 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
21112 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
21113 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
21114 /* Reciprocal estimates. Types U32 F16 F32. */
21115 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
21116 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
21117 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
21118 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
21119 /* VCLS. Types S8 S16 S32. */
21120 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
21121 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
21122 /* VCLZ. Types I8 I16 I32. */
21123 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
21124 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
21125 /* VCNT. Size 8. */
21126 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
21127 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
21128 /* Two address, untyped. */
21129 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
21130 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
21131 /* VTRN. Sizes 8 16 32. */
21132 nUF(vtrn, _vtrn, 2, (RNDQ, RNDQ), neon_trn),
21133 nUF(vtrnq, _vtrn, 2, (RNQ, RNQ), neon_trn),
21135 /* Table lookup. Size 8. */
21136 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21137 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
21139 #undef THUMB_VARIANT
21140 #define THUMB_VARIANT & fpu_vfp_v3_or_neon_ext
21142 #define ARM_VARIANT & fpu_vfp_v3_or_neon_ext
21144 /* Neon element/structure load/store. */
21145 nUF(vld1, _vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21146 nUF(vst1, _vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
21147 nUF(vld2, _vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21148 nUF(vst2, _vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
21149 nUF(vld3, _vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21150 nUF(vst3, _vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
21151 nUF(vld4, _vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21152 nUF(vst4, _vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
21154 #undef THUMB_VARIANT
21155 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
21157 #define ARM_VARIANT & fpu_vfp_ext_v3xd
21158 cCE("fconsts", eb00a00, 2, (RVS, I255), vfp_sp_const),
21159 cCE("fshtos", eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21160 cCE("fsltos", eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21161 cCE("fuhtos", ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21162 cCE("fultos", ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21163 cCE("ftoshs", ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21164 cCE("ftosls", ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21165 cCE("ftouhs", ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
21166 cCE("ftouls", ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
21168 #undef THUMB_VARIANT
21169 #define THUMB_VARIANT & fpu_vfp_ext_v3
21171 #define ARM_VARIANT & fpu_vfp_ext_v3
21173 cCE("fconstd", eb00b00, 2, (RVD, I255), vfp_dp_const),
21174 cCE("fshtod", eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21175 cCE("fsltod", eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21176 cCE("fuhtod", ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21177 cCE("fultod", ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21178 cCE("ftoshd", ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21179 cCE("ftosld", ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21180 cCE("ftouhd", ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
21181 cCE("ftould", ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
21184 #define ARM_VARIANT & fpu_vfp_ext_fma
21185 #undef THUMB_VARIANT
21186 #define THUMB_VARIANT & fpu_vfp_ext_fma
21187 /* Mnemonics shared by Neon and VFP. These are included in the
21188 VFP FMA variant; NEON and VFP FMA always includes the NEON
21189 FMA instructions. */
21190 nCEF(vfma, _vfma, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21191 nCEF(vfms, _vfms, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_fmac),
21192 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
21193 the v form should always be used. */
21194 cCE("ffmas", ea00a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21195 cCE("ffnmas", ea00a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
21196 cCE("ffmad", ea00b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21197 cCE("ffnmad", ea00b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
21198 nCE(vfnma, _vfnma, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21199 nCE(vfnms, _vfnms, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
21201 #undef THUMB_VARIANT
21203 #define ARM_VARIANT & arm_cext_xscale /* Intel XScale extensions. */
21205 cCE("mia", e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21206 cCE("miaph", e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21207 cCE("miabb", e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21208 cCE("miabt", e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21209 cCE("miatb", e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21210 cCE("miatt", e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
21211 cCE("mar", c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
21212 cCE("mra", c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
21215 #define ARM_VARIANT & arm_cext_iwmmxt /* Intel Wireless MMX technology. */
21217 cCE("tandcb", e13f130, 1, (RR), iwmmxt_tandorc),
21218 cCE("tandch", e53f130, 1, (RR), iwmmxt_tandorc),
21219 cCE("tandcw", e93f130, 1, (RR), iwmmxt_tandorc),
21220 cCE("tbcstb", e400010, 2, (RIWR, RR), rn_rd),
21221 cCE("tbcsth", e400050, 2, (RIWR, RR), rn_rd),
21222 cCE("tbcstw", e400090, 2, (RIWR, RR), rn_rd),
21223 cCE("textrcb", e130170, 2, (RR, I7), iwmmxt_textrc),
21224 cCE("textrch", e530170, 2, (RR, I7), iwmmxt_textrc),
21225 cCE("textrcw", e930170, 2, (RR, I7), iwmmxt_textrc),
21226 cCE("textrmub",e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21227 cCE("textrmuh",e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21228 cCE("textrmuw",e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
21229 cCE("textrmsb",e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21230 cCE("textrmsh",e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21231 cCE("textrmsw",e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
21232 cCE("tinsrb", e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21233 cCE("tinsrh", e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21234 cCE("tinsrw", e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
21235 cCE("tmcr", e000110, 2, (RIWC_RIWG, RR), rn_rd),
21236 cCE("tmcrr", c400000, 3, (RIWR, RR, RR), rm_rd_rn),
21237 cCE("tmia", e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21238 cCE("tmiaph", e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21239 cCE("tmiabb", e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21240 cCE("tmiabt", e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21241 cCE("tmiatb", e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21242 cCE("tmiatt", e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
21243 cCE("tmovmskb",e100030, 2, (RR, RIWR), rd_rn),
21244 cCE("tmovmskh",e500030, 2, (RR, RIWR), rd_rn),
21245 cCE("tmovmskw",e900030, 2, (RR, RIWR), rd_rn),
21246 cCE("tmrc", e100110, 2, (RR, RIWC_RIWG), rd_rn),
21247 cCE("tmrrc", c500000, 3, (RR, RR, RIWR), rd_rn_rm),
21248 cCE("torcb", e13f150, 1, (RR), iwmmxt_tandorc),
21249 cCE("torch", e53f150, 1, (RR), iwmmxt_tandorc),
21250 cCE("torcw", e93f150, 1, (RR), iwmmxt_tandorc),
21251 cCE("waccb", e0001c0, 2, (RIWR, RIWR), rd_rn),
21252 cCE("wacch", e4001c0, 2, (RIWR, RIWR), rd_rn),
21253 cCE("waccw", e8001c0, 2, (RIWR, RIWR), rd_rn),
21254 cCE("waddbss", e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21255 cCE("waddb", e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21256 cCE("waddbus", e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21257 cCE("waddhss", e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21258 cCE("waddh", e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21259 cCE("waddhus", e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21260 cCE("waddwss", eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21261 cCE("waddw", e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21262 cCE("waddwus", e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21263 cCE("waligni", e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
21264 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21265 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21266 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21267 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21268 cCE("wand", e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21269 cCE("wandn", e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21270 cCE("wavg2b", e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21271 cCE("wavg2br", e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21272 cCE("wavg2h", ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21273 cCE("wavg2hr", ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21274 cCE("wcmpeqb", e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21275 cCE("wcmpeqh", e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21276 cCE("wcmpeqw", e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21277 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21278 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21279 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21280 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21281 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21282 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21283 cCE("wldrb", c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21284 cCE("wldrh", c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21285 cCE("wldrw", c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21286 cCE("wldrd", c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21287 cCE("wmacs", e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21288 cCE("wmacsz", e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21289 cCE("wmacu", e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21290 cCE("wmacuz", e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21291 cCE("wmadds", ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21292 cCE("wmaddu", e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21293 cCE("wmaxsb", e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21294 cCE("wmaxsh", e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21295 cCE("wmaxsw", ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21296 cCE("wmaxub", e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21297 cCE("wmaxuh", e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21298 cCE("wmaxuw", e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21299 cCE("wminsb", e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21300 cCE("wminsh", e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21301 cCE("wminsw", eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21302 cCE("wminub", e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21303 cCE("wminuh", e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21304 cCE("wminuw", e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21305 cCE("wmov", e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
21306 cCE("wmulsm", e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21307 cCE("wmulsl", e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21308 cCE("wmulum", e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21309 cCE("wmulul", e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21310 cCE("wor", e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21311 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21312 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21313 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21314 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21315 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21316 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21317 cCE("wrorh", e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21318 cCE("wrorhg", e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21319 cCE("wrorw", eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21320 cCE("wrorwg", eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21321 cCE("wrord", ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21322 cCE("wrordg", ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21323 cCE("wsadb", e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21324 cCE("wsadbz", e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21325 cCE("wsadh", e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21326 cCE("wsadhz", e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21327 cCE("wshufh", e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
21328 cCE("wsllh", e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21329 cCE("wsllhg", e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21330 cCE("wsllw", e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21331 cCE("wsllwg", e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21332 cCE("wslld", ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21333 cCE("wslldg", ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21334 cCE("wsrah", e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21335 cCE("wsrahg", e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21336 cCE("wsraw", e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21337 cCE("wsrawg", e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21338 cCE("wsrad", ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21339 cCE("wsradg", ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21340 cCE("wsrlh", e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21341 cCE("wsrlhg", e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21342 cCE("wsrlw", ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21343 cCE("wsrlwg", ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21344 cCE("wsrld", ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
21345 cCE("wsrldg", ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
21346 cCE("wstrb", c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21347 cCE("wstrh", c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
21348 cCE("wstrw", c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
21349 cCE("wstrd", c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
21350 cCE("wsubbss", e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21351 cCE("wsubb", e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21352 cCE("wsubbus", e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21353 cCE("wsubhss", e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21354 cCE("wsubh", e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21355 cCE("wsubhus", e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21356 cCE("wsubwss", eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21357 cCE("wsubw", e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21358 cCE("wsubwus", e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21359 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR), rd_rn),
21360 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR), rd_rn),
21361 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR), rd_rn),
21362 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR), rd_rn),
21363 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR), rd_rn),
21364 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR), rd_rn),
21365 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21366 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21367 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21368 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR), rd_rn),
21369 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR), rd_rn),
21370 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR), rd_rn),
21371 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR), rd_rn),
21372 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR), rd_rn),
21373 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR), rd_rn),
21374 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21375 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21376 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21377 cCE("wxor", e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21378 cCE("wzero", e300000, 1, (RIWR), iwmmxt_wzero),
21381 #define ARM_VARIANT & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
21383 cCE("torvscb", e12f190, 1, (RR), iwmmxt_tandorc),
21384 cCE("torvsch", e52f190, 1, (RR), iwmmxt_tandorc),
21385 cCE("torvscw", e92f190, 1, (RR), iwmmxt_tandorc),
21386 cCE("wabsb", e2001c0, 2, (RIWR, RIWR), rd_rn),
21387 cCE("wabsh", e6001c0, 2, (RIWR, RIWR), rd_rn),
21388 cCE("wabsw", ea001c0, 2, (RIWR, RIWR), rd_rn),
21389 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21390 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21391 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21392 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21393 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21394 cCE("waddhc", e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21395 cCE("waddwc", ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21396 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21397 cCE("wavg4", e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21398 cCE("wavg4r", e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21399 cCE("wmaddsn", ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21400 cCE("wmaddsx", eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21401 cCE("wmaddun", ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21402 cCE("wmaddux", e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21403 cCE("wmerge", e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
21404 cCE("wmiabb", e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21405 cCE("wmiabt", e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21406 cCE("wmiatb", e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21407 cCE("wmiatt", e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21408 cCE("wmiabbn", e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21409 cCE("wmiabtn", e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21410 cCE("wmiatbn", e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21411 cCE("wmiattn", e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21412 cCE("wmiawbb", e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21413 cCE("wmiawbt", e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21414 cCE("wmiawtb", ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21415 cCE("wmiawtt", eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21416 cCE("wmiawbbn", ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21417 cCE("wmiawbtn", ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21418 cCE("wmiawtbn", ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21419 cCE("wmiawttn", ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21420 cCE("wmulsmr", ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21421 cCE("wmulumr", ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21422 cCE("wmulwumr", ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21423 cCE("wmulwsmr", ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21424 cCE("wmulwum", ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21425 cCE("wmulwsm", ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21426 cCE("wmulwl", eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21427 cCE("wqmiabb", e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21428 cCE("wqmiabt", e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21429 cCE("wqmiatb", ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21430 cCE("wqmiatt", eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21431 cCE("wqmiabbn", ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21432 cCE("wqmiabtn", ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21433 cCE("wqmiatbn", ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21434 cCE("wqmiattn", ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21435 cCE("wqmulm", e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21436 cCE("wqmulmr", e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21437 cCE("wqmulwm", ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21438 cCE("wqmulwmr", ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21439 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
21442 #define ARM_VARIANT & arm_cext_maverick /* Cirrus Maverick instructions. */
21444 cCE("cfldrs", c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21445 cCE("cfldrd", c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21446 cCE("cfldr32", c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21447 cCE("cfldr64", c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21448 cCE("cfstrs", c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
21449 cCE("cfstrd", c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
21450 cCE("cfstr32", c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
21451 cCE("cfstr64", c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
21452 cCE("cfmvsr", e000450, 2, (RMF, RR), rn_rd),
21453 cCE("cfmvrs", e100450, 2, (RR, RMF), rd_rn),
21454 cCE("cfmvdlr", e000410, 2, (RMD, RR), rn_rd),
21455 cCE("cfmvrdl", e100410, 2, (RR, RMD), rd_rn),
21456 cCE("cfmvdhr", e000430, 2, (RMD, RR), rn_rd),
21457 cCE("cfmvrdh", e100430, 2, (RR, RMD), rd_rn),
21458 cCE("cfmv64lr",e000510, 2, (RMDX, RR), rn_rd),
21459 cCE("cfmvr64l",e100510, 2, (RR, RMDX), rd_rn),
21460 cCE("cfmv64hr",e000530, 2, (RMDX, RR), rn_rd),
21461 cCE("cfmvr64h",e100530, 2, (RR, RMDX), rd_rn),
21462 cCE("cfmval32",e200440, 2, (RMAX, RMFX), rd_rn),
21463 cCE("cfmv32al",e100440, 2, (RMFX, RMAX), rd_rn),
21464 cCE("cfmvam32",e200460, 2, (RMAX, RMFX), rd_rn),
21465 cCE("cfmv32am",e100460, 2, (RMFX, RMAX), rd_rn),
21466 cCE("cfmvah32",e200480, 2, (RMAX, RMFX), rd_rn),
21467 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX), rd_rn),
21468 cCE("cfmva32", e2004a0, 2, (RMAX, RMFX), rd_rn),
21469 cCE("cfmv32a", e1004a0, 2, (RMFX, RMAX), rd_rn),
21470 cCE("cfmva64", e2004c0, 2, (RMAX, RMDX), rd_rn),
21471 cCE("cfmv64a", e1004c0, 2, (RMDX, RMAX), rd_rn),
21472 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX), mav_dspsc),
21473 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS), rd),
21474 cCE("cfcpys", e000400, 2, (RMF, RMF), rd_rn),
21475 cCE("cfcpyd", e000420, 2, (RMD, RMD), rd_rn),
21476 cCE("cfcvtsd", e000460, 2, (RMD, RMF), rd_rn),
21477 cCE("cfcvtds", e000440, 2, (RMF, RMD), rd_rn),
21478 cCE("cfcvt32s",e000480, 2, (RMF, RMFX), rd_rn),
21479 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX), rd_rn),
21480 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX), rd_rn),
21481 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX), rd_rn),
21482 cCE("cfcvts32",e100580, 2, (RMFX, RMF), rd_rn),
21483 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD), rd_rn),
21484 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF), rd_rn),
21485 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD), rd_rn),
21486 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR), mav_triple),
21487 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR), mav_triple),
21488 cCE("cfsh32", e000500, 3, (RMFX, RMFX, I63s), mav_shift),
21489 cCE("cfsh64", e200500, 3, (RMDX, RMDX, I63s), mav_shift),
21490 cCE("cfcmps", e100490, 3, (RR, RMF, RMF), rd_rn_rm),
21491 cCE("cfcmpd", e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
21492 cCE("cfcmp32", e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
21493 cCE("cfcmp64", e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
21494 cCE("cfabss", e300400, 2, (RMF, RMF), rd_rn),
21495 cCE("cfabsd", e300420, 2, (RMD, RMD), rd_rn),
21496 cCE("cfnegs", e300440, 2, (RMF, RMF), rd_rn),
21497 cCE("cfnegd", e300460, 2, (RMD, RMD), rd_rn),
21498 cCE("cfadds", e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
21499 cCE("cfaddd", e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
21500 cCE("cfsubs", e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
21501 cCE("cfsubd", e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
21502 cCE("cfmuls", e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
21503 cCE("cfmuld", e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
21504 cCE("cfabs32", e300500, 2, (RMFX, RMFX), rd_rn),
21505 cCE("cfabs64", e300520, 2, (RMDX, RMDX), rd_rn),
21506 cCE("cfneg32", e300540, 2, (RMFX, RMFX), rd_rn),
21507 cCE("cfneg64", e300560, 2, (RMDX, RMDX), rd_rn),
21508 cCE("cfadd32", e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21509 cCE("cfadd64", e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21510 cCE("cfsub32", e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21511 cCE("cfsub64", e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21512 cCE("cfmul32", e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21513 cCE("cfmul64", e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
21514 cCE("cfmac32", e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21515 cCE("cfmsc32", e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
21516 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21517 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
21518 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21519 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
21521 /* ARMv8.5-A instructions. */
21523 #define ARM_VARIANT & arm_ext_sb
21524 #undef THUMB_VARIANT
21525 #define THUMB_VARIANT & arm_ext_sb
21526 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
21528 /* ARMv8-M instructions. */
21530 #define ARM_VARIANT NULL
21531 #undef THUMB_VARIANT
21532 #define THUMB_VARIANT & arm_ext_v8m
21533 ToU("sg", e97fe97f, 0, (), noargs),
21534 ToC("blxns", 4784, 1, (RRnpc), t_blx),
21535 ToC("bxns", 4704, 1, (RRnpc), t_bx),
21536 ToC("tt", e840f000, 2, (RRnpc, RRnpc), tt),
21537 ToC("ttt", e840f040, 2, (RRnpc, RRnpc), tt),
21538 ToC("tta", e840f080, 2, (RRnpc, RRnpc), tt),
21539 ToC("ttat", e840f0c0, 2, (RRnpc, RRnpc), tt),
21541 /* FP for ARMv8-M Mainline. Enabled for ARMv8-M Mainline because the
21542 instructions behave as nop if no VFP is present. */
21543 #undef THUMB_VARIANT
21544 #define THUMB_VARIANT & arm_ext_v8m_main
21545 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
21546 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
21549 #undef THUMB_VARIANT
21575 /* MD interface: bits in the object file. */
21577 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
21578 for use in the a.out file, and stores them in the array pointed to by buf.
21579 This knows about the endian-ness of the target machine and does
21580 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
21581 2 (short) and 4 (long) Floating numbers are put out as a series of
21582 LITTLENUMS (shorts, here at least). */
21585 md_number_to_chars (char * buf, valueT val, int n)
21587 if (target_big_endian)
21588 number_to_chars_bigendian (buf, val, n);
21590 number_to_chars_littleendian (buf, val, n);
21594 md_chars_to_number (char * buf, int n)
21597 unsigned char * where = (unsigned char *) buf;
21599 if (target_big_endian)
21604 result |= (*where++ & 255);
21612 result |= (where[n] & 255);
21619 /* MD interface: Sections. */
21621 /* Calculate the maximum variable size (i.e., excluding fr_fix)
21622 that an rs_machine_dependent frag may reach. */
21625 arm_frag_max_var (fragS *fragp)
21627 /* We only use rs_machine_dependent for variable-size Thumb instructions,
21628 which are either THUMB_SIZE (2) or INSN_SIZE (4).
21630 Note that we generate relaxable instructions even for cases that don't
21631 really need it, like an immediate that's a trivial constant. So we're
21632 overestimating the instruction size for some of those cases. Rather
21633 than putting more intelligence here, it would probably be better to
21634 avoid generating a relaxation frag in the first place when it can be
21635 determined up front that a short instruction will suffice. */
21637 gas_assert (fragp->fr_type == rs_machine_dependent);
21641 /* Estimate the size of a frag before relaxing. Assume everything fits in
21645 md_estimate_size_before_relax (fragS * fragp,
21646 segT segtype ATTRIBUTE_UNUSED)
21652 /* Convert a machine dependent frag. */
21655 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
21657 unsigned long insn;
21658 unsigned long old_op;
21666 buf = fragp->fr_literal + fragp->fr_fix;
21668 old_op = bfd_get_16(abfd, buf);
21669 if (fragp->fr_symbol)
21671 exp.X_op = O_symbol;
21672 exp.X_add_symbol = fragp->fr_symbol;
21676 exp.X_op = O_constant;
21678 exp.X_add_number = fragp->fr_offset;
21679 opcode = fragp->fr_subtype;
21682 case T_MNEM_ldr_pc:
21683 case T_MNEM_ldr_pc2:
21684 case T_MNEM_ldr_sp:
21685 case T_MNEM_str_sp:
21692 if (fragp->fr_var == 4)
21694 insn = THUMB_OP32 (opcode);
21695 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
21697 insn |= (old_op & 0x700) << 4;
21701 insn |= (old_op & 7) << 12;
21702 insn |= (old_op & 0x38) << 13;
21704 insn |= 0x00000c00;
21705 put_thumb32_insn (buf, insn);
21706 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
21710 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
21712 pc_rel = (opcode == T_MNEM_ldr_pc2);
21715 if (fragp->fr_var == 4)
21717 insn = THUMB_OP32 (opcode);
21718 insn |= (old_op & 0xf0) << 4;
21719 put_thumb32_insn (buf, insn);
21720 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
21724 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21725 exp.X_add_number -= 4;
21733 if (fragp->fr_var == 4)
21735 int r0off = (opcode == T_MNEM_mov
21736 || opcode == T_MNEM_movs) ? 0 : 8;
21737 insn = THUMB_OP32 (opcode);
21738 insn = (insn & 0xe1ffffff) | 0x10000000;
21739 insn |= (old_op & 0x700) << r0off;
21740 put_thumb32_insn (buf, insn);
21741 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21745 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
21750 if (fragp->fr_var == 4)
21752 insn = THUMB_OP32(opcode);
21753 put_thumb32_insn (buf, insn);
21754 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
21757 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
21761 if (fragp->fr_var == 4)
21763 insn = THUMB_OP32(opcode);
21764 insn |= (old_op & 0xf00) << 14;
21765 put_thumb32_insn (buf, insn);
21766 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
21769 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
21772 case T_MNEM_add_sp:
21773 case T_MNEM_add_pc:
21774 case T_MNEM_inc_sp:
21775 case T_MNEM_dec_sp:
21776 if (fragp->fr_var == 4)
21778 /* ??? Choose between add and addw. */
21779 insn = THUMB_OP32 (opcode);
21780 insn |= (old_op & 0xf0) << 4;
21781 put_thumb32_insn (buf, insn);
21782 if (opcode == T_MNEM_add_pc)
21783 reloc_type = BFD_RELOC_ARM_T32_IMM12;
21785 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21788 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21796 if (fragp->fr_var == 4)
21798 insn = THUMB_OP32 (opcode);
21799 insn |= (old_op & 0xf0) << 4;
21800 insn |= (old_op & 0xf) << 16;
21801 put_thumb32_insn (buf, insn);
21802 if (insn & (1 << 20))
21803 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
21805 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
21808 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
21814 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
21815 (enum bfd_reloc_code_real) reloc_type);
21816 fixp->fx_file = fragp->fr_file;
21817 fixp->fx_line = fragp->fr_line;
21818 fragp->fr_fix += fragp->fr_var;
21820 /* Set whether we use thumb-2 ISA based on final relaxation results. */
21821 if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
21822 && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
21823 ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
21826 /* Return the size of a relaxable immediate operand instruction.
21827 SHIFT and SIZE specify the form of the allowable immediate. */
21829 relax_immediate (fragS *fragp, int size, int shift)
21835 /* ??? Should be able to do better than this. */
21836 if (fragp->fr_symbol)
21839 low = (1 << shift) - 1;
21840 mask = (1 << (shift + size)) - (1 << shift);
21841 offset = fragp->fr_offset;
21842 /* Force misaligned offsets to 32-bit variant. */
21845 if (offset & ~mask)
21850 /* Get the address of a symbol during relaxation. */
21852 relaxed_symbol_addr (fragS *fragp, long stretch)
21858 sym = fragp->fr_symbol;
21859 sym_frag = symbol_get_frag (sym);
21860 know (S_GET_SEGMENT (sym) != absolute_section
21861 || sym_frag == &zero_address_frag);
21862 addr = S_GET_VALUE (sym) + fragp->fr_offset;
21864 /* If frag has yet to be reached on this pass, assume it will
21865 move by STRETCH just as we did. If this is not so, it will
21866 be because some frag between grows, and that will force
21870 && sym_frag->relax_marker != fragp->relax_marker)
21874 /* Adjust stretch for any alignment frag. Note that if have
21875 been expanding the earlier code, the symbol may be
21876 defined in what appears to be an earlier frag. FIXME:
21877 This doesn't handle the fr_subtype field, which specifies
21878 a maximum number of bytes to skip when doing an
21880 for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
21882 if (f->fr_type == rs_align || f->fr_type == rs_align_code)
21885 stretch = - ((- stretch)
21886 & ~ ((1 << (int) f->fr_offset) - 1));
21888 stretch &= ~ ((1 << (int) f->fr_offset) - 1);
21900 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
21903 relax_adr (fragS *fragp, asection *sec, long stretch)
21908 /* Assume worst case for symbols not known to be in the same section. */
21909 if (fragp->fr_symbol == NULL
21910 || !S_IS_DEFINED (fragp->fr_symbol)
21911 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21912 || S_IS_WEAK (fragp->fr_symbol))
21915 val = relaxed_symbol_addr (fragp, stretch);
21916 addr = fragp->fr_address + fragp->fr_fix;
21917 addr = (addr + 4) & ~3;
21918 /* Force misaligned targets to 32-bit variant. */
21922 if (val < 0 || val > 1020)
21927 /* Return the size of a relaxable add/sub immediate instruction. */
21929 relax_addsub (fragS *fragp, asection *sec)
21934 buf = fragp->fr_literal + fragp->fr_fix;
21935 op = bfd_get_16(sec->owner, buf);
21936 if ((op & 0xf) == ((op >> 4) & 0xf))
21937 return relax_immediate (fragp, 8, 0);
21939 return relax_immediate (fragp, 3, 0);
21942 /* Return TRUE iff the definition of symbol S could be pre-empted
21943 (overridden) at link or load time. */
21945 symbol_preemptible (symbolS *s)
21947 /* Weak symbols can always be pre-empted. */
21951 /* Non-global symbols cannot be pre-empted. */
21952 if (! S_IS_EXTERNAL (s))
21956 /* In ELF, a global symbol can be marked protected, or private. In that
21957 case it can't be pre-empted (other definitions in the same link unit
21958 would violate the ODR). */
21959 if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
21963 /* Other global symbols might be pre-empted. */
21967 /* Return the size of a relaxable branch instruction. BITS is the
21968 size of the offset field in the narrow instruction. */
21971 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
21977 /* Assume worst case for symbols not known to be in the same section. */
21978 if (!S_IS_DEFINED (fragp->fr_symbol)
21979 || sec != S_GET_SEGMENT (fragp->fr_symbol)
21980 || S_IS_WEAK (fragp->fr_symbol))
21984 /* A branch to a function in ARM state will require interworking. */
21985 if (S_IS_DEFINED (fragp->fr_symbol)
21986 && ARM_IS_FUNC (fragp->fr_symbol))
21990 if (symbol_preemptible (fragp->fr_symbol))
21993 val = relaxed_symbol_addr (fragp, stretch);
21994 addr = fragp->fr_address + fragp->fr_fix + 4;
21997 /* Offset is a signed value *2 */
21999 if (val >= limit || val < -limit)
22005 /* Relax a machine dependent frag. This returns the amount by which
22006 the current size of the frag should change. */
22009 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
22014 oldsize = fragp->fr_var;
22015 switch (fragp->fr_subtype)
22017 case T_MNEM_ldr_pc2:
22018 newsize = relax_adr (fragp, sec, stretch);
22020 case T_MNEM_ldr_pc:
22021 case T_MNEM_ldr_sp:
22022 case T_MNEM_str_sp:
22023 newsize = relax_immediate (fragp, 8, 2);
22027 newsize = relax_immediate (fragp, 5, 2);
22031 newsize = relax_immediate (fragp, 5, 1);
22035 newsize = relax_immediate (fragp, 5, 0);
22038 newsize = relax_adr (fragp, sec, stretch);
22044 newsize = relax_immediate (fragp, 8, 0);
22047 newsize = relax_branch (fragp, sec, 11, stretch);
22050 newsize = relax_branch (fragp, sec, 8, stretch);
22052 case T_MNEM_add_sp:
22053 case T_MNEM_add_pc:
22054 newsize = relax_immediate (fragp, 8, 2);
22056 case T_MNEM_inc_sp:
22057 case T_MNEM_dec_sp:
22058 newsize = relax_immediate (fragp, 7, 2);
22064 newsize = relax_addsub (fragp, sec);
22070 fragp->fr_var = newsize;
22071 /* Freeze wide instructions that are at or before the same location as
22072 in the previous pass. This avoids infinite loops.
22073 Don't freeze them unconditionally because targets may be artificially
22074 misaligned by the expansion of preceding frags. */
22075 if (stretch <= 0 && newsize > 2)
22077 md_convert_frag (sec->owner, sec, fragp);
22081 return newsize - oldsize;
22084 /* Round up a section size to the appropriate boundary. */
22087 md_section_align (segT segment ATTRIBUTE_UNUSED,
22093 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
22094 of an rs_align_code fragment. */
22097 arm_handle_align (fragS * fragP)
22099 static unsigned char const arm_noop[2][2][4] =
22102 {0x00, 0x00, 0xa0, 0xe1}, /* LE */
22103 {0xe1, 0xa0, 0x00, 0x00}, /* BE */
22106 {0x00, 0xf0, 0x20, 0xe3}, /* LE */
22107 {0xe3, 0x20, 0xf0, 0x00}, /* BE */
22110 static unsigned char const thumb_noop[2][2][2] =
22113 {0xc0, 0x46}, /* LE */
22114 {0x46, 0xc0}, /* BE */
22117 {0x00, 0xbf}, /* LE */
22118 {0xbf, 0x00} /* BE */
22121 static unsigned char const wide_thumb_noop[2][4] =
22122 { /* Wide Thumb-2 */
22123 {0xaf, 0xf3, 0x00, 0x80}, /* LE */
22124 {0xf3, 0xaf, 0x80, 0x00}, /* BE */
22127 unsigned bytes, fix, noop_size;
22129 const unsigned char * noop;
22130 const unsigned char *narrow_noop = NULL;
22135 if (fragP->fr_type != rs_align_code)
22138 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
22139 p = fragP->fr_literal + fragP->fr_fix;
22142 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
22143 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
22145 gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
22147 if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
22149 if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22150 ? selected_cpu : arm_arch_none, arm_ext_v6t2))
22152 narrow_noop = thumb_noop[1][target_big_endian];
22153 noop = wide_thumb_noop[target_big_endian];
22156 noop = thumb_noop[0][target_big_endian];
22164 noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
22165 ? selected_cpu : arm_arch_none,
22167 [target_big_endian];
22174 fragP->fr_var = noop_size;
22176 if (bytes & (noop_size - 1))
22178 fix = bytes & (noop_size - 1);
22180 insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
22182 memset (p, 0, fix);
22189 if (bytes & noop_size)
22191 /* Insert a narrow noop. */
22192 memcpy (p, narrow_noop, noop_size);
22194 bytes -= noop_size;
22198 /* Use wide noops for the remainder */
22202 while (bytes >= noop_size)
22204 memcpy (p, noop, noop_size);
22206 bytes -= noop_size;
22210 fragP->fr_fix += fix;
22213 /* Called from md_do_align. Used to create an alignment
22214 frag in a code section. */
22217 arm_frag_align_code (int n, int max)
22221 /* We assume that there will never be a requirement
22222 to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes. */
22223 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
22228 _("alignments greater than %d bytes not supported in .text sections."),
22229 MAX_MEM_FOR_RS_ALIGN_CODE + 1);
22230 as_fatal ("%s", err_msg);
22233 p = frag_var (rs_align_code,
22234 MAX_MEM_FOR_RS_ALIGN_CODE,
22236 (relax_substateT) max,
22243 /* Perform target specific initialisation of a frag.
22244 Note - despite the name this initialisation is not done when the frag
22245 is created, but only when its type is assigned. A frag can be created
22246 and used a long time before its type is set, so beware of assuming that
22247 this initialisation is performed first. */
22251 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
22253 /* Record whether this frag is in an ARM or a THUMB area. */
22254 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22257 #else /* OBJ_ELF is defined. */
22259 arm_init_frag (fragS * fragP, int max_chars)
22261 bfd_boolean frag_thumb_mode;
22263 /* If the current ARM vs THUMB mode has not already
22264 been recorded into this frag then do so now. */
22265 if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
22266 fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22268 /* PR 21809: Do not set a mapping state for debug sections
22269 - it just confuses other tools. */
22270 if (bfd_get_section_flags (NULL, now_seg) & SEC_DEBUGGING)
22273 frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
22275 /* Record a mapping symbol for alignment frags. We will delete this
22276 later if the alignment ends up empty. */
22277 switch (fragP->fr_type)
22280 case rs_align_test:
22282 mapping_state_2 (MAP_DATA, max_chars);
22284 case rs_align_code:
22285 mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
22292 /* When we change sections we need to issue a new mapping symbol. */
22295 arm_elf_change_section (void)
22297 /* Link an unlinked unwind index table section to the .text section. */
22298 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
22299 && elf_linked_to_section (now_seg) == NULL)
22300 elf_linked_to_section (now_seg) = text_section;
22304 arm_elf_section_type (const char * str, size_t len)
22306 if (len == 5 && strncmp (str, "exidx", 5) == 0)
22307 return SHT_ARM_EXIDX;
22312 /* Code to deal with unwinding tables. */
22314 static void add_unwind_adjustsp (offsetT);
22316 /* Generate any deferred unwind frame offset. */
22319 flush_pending_unwind (void)
22323 offset = unwind.pending_offset;
22324 unwind.pending_offset = 0;
22326 add_unwind_adjustsp (offset);
22329 /* Add an opcode to this list for this function. Two-byte opcodes should
22330 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
22334 add_unwind_opcode (valueT op, int length)
22336 /* Add any deferred stack adjustment. */
22337 if (unwind.pending_offset)
22338 flush_pending_unwind ();
22340 unwind.sp_restored = 0;
22342 if (unwind.opcode_count + length > unwind.opcode_alloc)
22344 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
22345 if (unwind.opcodes)
22346 unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
22347 unwind.opcode_alloc);
22349 unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
22354 unwind.opcodes[unwind.opcode_count] = op & 0xff;
22356 unwind.opcode_count++;
22360 /* Add unwind opcodes to adjust the stack pointer. */
22363 add_unwind_adjustsp (offsetT offset)
22367 if (offset > 0x200)
22369 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
22374 /* Long form: 0xb2, uleb128. */
22375 /* This might not fit in a word so add the individual bytes,
22376 remembering the list is built in reverse order. */
22377 o = (valueT) ((offset - 0x204) >> 2);
22379 add_unwind_opcode (0, 1);
22381 /* Calculate the uleb128 encoding of the offset. */
22385 bytes[n] = o & 0x7f;
22391 /* Add the insn. */
22393 add_unwind_opcode (bytes[n - 1], 1);
22394 add_unwind_opcode (0xb2, 1);
22396 else if (offset > 0x100)
22398 /* Two short opcodes. */
22399 add_unwind_opcode (0x3f, 1);
22400 op = (offset - 0x104) >> 2;
22401 add_unwind_opcode (op, 1);
22403 else if (offset > 0)
22405 /* Short opcode. */
22406 op = (offset - 4) >> 2;
22407 add_unwind_opcode (op, 1);
22409 else if (offset < 0)
22412 while (offset > 0x100)
22414 add_unwind_opcode (0x7f, 1);
22417 op = ((offset - 4) >> 2) | 0x40;
22418 add_unwind_opcode (op, 1);
22422 /* Finish the list of unwind opcodes for this function. */
22425 finish_unwind_opcodes (void)
22429 if (unwind.fp_used)
22431 /* Adjust sp as necessary. */
22432 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
22433 flush_pending_unwind ();
22435 /* After restoring sp from the frame pointer. */
22436 op = 0x90 | unwind.fp_reg;
22437 add_unwind_opcode (op, 1);
22440 flush_pending_unwind ();
22444 /* Start an exception table entry. If idx is nonzero this is an index table
22448 start_unwind_section (const segT text_seg, int idx)
22450 const char * text_name;
22451 const char * prefix;
22452 const char * prefix_once;
22453 const char * group_name;
22461 prefix = ELF_STRING_ARM_unwind;
22462 prefix_once = ELF_STRING_ARM_unwind_once;
22463 type = SHT_ARM_EXIDX;
22467 prefix = ELF_STRING_ARM_unwind_info;
22468 prefix_once = ELF_STRING_ARM_unwind_info_once;
22469 type = SHT_PROGBITS;
22472 text_name = segment_name (text_seg);
22473 if (streq (text_name, ".text"))
22476 if (strncmp (text_name, ".gnu.linkonce.t.",
22477 strlen (".gnu.linkonce.t.")) == 0)
22479 prefix = prefix_once;
22480 text_name += strlen (".gnu.linkonce.t.");
22483 sec_name = concat (prefix, text_name, (char *) NULL);
22489 /* Handle COMDAT group. */
22490 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
22492 group_name = elf_group_name (text_seg);
22493 if (group_name == NULL)
22495 as_bad (_("Group section `%s' has no group signature"),
22496 segment_name (text_seg));
22497 ignore_rest_of_line ();
22500 flags |= SHF_GROUP;
22504 obj_elf_change_section (sec_name, type, 0, flags, 0, group_name,
22507 /* Set the section link for index tables. */
22509 elf_linked_to_section (now_seg) = text_seg;
22513 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
22514 personality routine data. Returns zero, or the index table value for
22515 an inline entry. */
22518 create_unwind_entry (int have_data)
22523 /* The current word of data. */
22525 /* The number of bytes left in this word. */
22528 finish_unwind_opcodes ();
22530 /* Remember the current text section. */
22531 unwind.saved_seg = now_seg;
22532 unwind.saved_subseg = now_subseg;
22534 start_unwind_section (now_seg, 0);
22536 if (unwind.personality_routine == NULL)
22538 if (unwind.personality_index == -2)
22541 as_bad (_("handlerdata in cantunwind frame"));
22542 return 1; /* EXIDX_CANTUNWIND. */
22545 /* Use a default personality routine if none is specified. */
22546 if (unwind.personality_index == -1)
22548 if (unwind.opcode_count > 3)
22549 unwind.personality_index = 1;
22551 unwind.personality_index = 0;
22554 /* Space for the personality routine entry. */
22555 if (unwind.personality_index == 0)
22557 if (unwind.opcode_count > 3)
22558 as_bad (_("too many unwind opcodes for personality routine 0"));
22562 /* All the data is inline in the index table. */
22565 while (unwind.opcode_count > 0)
22567 unwind.opcode_count--;
22568 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22572 /* Pad with "finish" opcodes. */
22574 data = (data << 8) | 0xb0;
22581 /* We get two opcodes "free" in the first word. */
22582 size = unwind.opcode_count - 2;
22586 /* PR 16765: Missing or misplaced unwind directives can trigger this. */
22587 if (unwind.personality_index != -1)
22589 as_bad (_("attempt to recreate an unwind entry"));
22593 /* An extra byte is required for the opcode count. */
22594 size = unwind.opcode_count + 1;
22597 size = (size + 3) >> 2;
22599 as_bad (_("too many unwind opcodes"));
22601 frag_align (2, 0, 0);
22602 record_alignment (now_seg, 2);
22603 unwind.table_entry = expr_build_dot ();
22605 /* Allocate the table entry. */
22606 ptr = frag_more ((size << 2) + 4);
22607 /* PR 13449: Zero the table entries in case some of them are not used. */
22608 memset (ptr, 0, (size << 2) + 4);
22609 where = frag_now_fix () - ((size << 2) + 4);
22611 switch (unwind.personality_index)
22614 /* ??? Should this be a PLT generating relocation? */
22615 /* Custom personality routine. */
22616 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
22617 BFD_RELOC_ARM_PREL31);
22622 /* Set the first byte to the number of additional words. */
22623 data = size > 0 ? size - 1 : 0;
22627 /* ABI defined personality routines. */
22629 /* Three opcodes bytes are packed into the first word. */
22636 /* The size and first two opcode bytes go in the first word. */
22637 data = ((0x80 + unwind.personality_index) << 8) | size;
22642 /* Should never happen. */
22646 /* Pack the opcodes into words (MSB first), reversing the list at the same
22648 while (unwind.opcode_count > 0)
22652 md_number_to_chars (ptr, data, 4);
22657 unwind.opcode_count--;
22659 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
22662 /* Finish off the last word. */
22665 /* Pad with "finish" opcodes. */
22667 data = (data << 8) | 0xb0;
22669 md_number_to_chars (ptr, data, 4);
22674 /* Add an empty descriptor if there is no user-specified data. */
22675 ptr = frag_more (4);
22676 md_number_to_chars (ptr, 0, 4);
22683 /* Initialize the DWARF-2 unwind information for this procedure. */
22686 tc_arm_frame_initial_instructions (void)
22688 cfi_add_CFA_def_cfa (REG_SP, 0);
22690 #endif /* OBJ_ELF */
22692 /* Convert REGNAME to a DWARF-2 register number. */
22695 tc_arm_regname_to_dw2regnum (char *regname)
22697 int reg = arm_reg_parse (®name, REG_TYPE_RN);
22701 /* PR 16694: Allow VFP registers as well. */
22702 reg = arm_reg_parse (®name, REG_TYPE_VFS);
22706 reg = arm_reg_parse (®name, REG_TYPE_VFD);
22715 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
22719 exp.X_op = O_secrel;
22720 exp.X_add_symbol = symbol;
22721 exp.X_add_number = 0;
22722 emit_expr (&exp, size);
22726 /* MD interface: Symbol and relocation handling. */
22728 /* Return the address within the segment that a PC-relative fixup is
22729 relative to. For ARM, PC-relative fixups applied to instructions
22730 are generally relative to the location of the fixup plus 8 bytes.
22731 Thumb branches are offset by 4, and Thumb loads relative to PC
22732 require special handling. */
22735 md_pcrel_from_section (fixS * fixP, segT seg)
22737 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
22739 /* If this is pc-relative and we are going to emit a relocation
22740 then we just want to put out any pipeline compensation that the linker
22741 will need. Otherwise we want to use the calculated base.
22742 For WinCE we skip the bias for externals as well, since this
22743 is how the MS ARM-CE assembler behaves and we want to be compatible. */
22745 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
22746 || (arm_force_relocation (fixP)
22748 && !S_IS_EXTERNAL (fixP->fx_addsy)
22754 switch (fixP->fx_r_type)
22756 /* PC relative addressing on the Thumb is slightly odd as the
22757 bottom two bits of the PC are forced to zero for the
22758 calculation. This happens *after* application of the
22759 pipeline offset. However, Thumb adrl already adjusts for
22760 this, so we need not do it again. */
22761 case BFD_RELOC_ARM_THUMB_ADD:
22764 case BFD_RELOC_ARM_THUMB_OFFSET:
22765 case BFD_RELOC_ARM_T32_OFFSET_IMM:
22766 case BFD_RELOC_ARM_T32_ADD_PC12:
22767 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
22768 return (base + 4) & ~3;
22770 /* Thumb branches are simply offset by +4. */
22771 case BFD_RELOC_THUMB_PCREL_BRANCH7:
22772 case BFD_RELOC_THUMB_PCREL_BRANCH9:
22773 case BFD_RELOC_THUMB_PCREL_BRANCH12:
22774 case BFD_RELOC_THUMB_PCREL_BRANCH20:
22775 case BFD_RELOC_THUMB_PCREL_BRANCH25:
22778 case BFD_RELOC_THUMB_PCREL_BRANCH23:
22780 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22781 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22782 && ARM_IS_FUNC (fixP->fx_addsy)
22783 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22784 base = fixP->fx_where + fixP->fx_frag->fr_address;
22787 /* BLX is like branches above, but forces the low two bits of PC to
22789 case BFD_RELOC_THUMB_PCREL_BLX:
22791 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22792 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22793 && THUMB_IS_FUNC (fixP->fx_addsy)
22794 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22795 base = fixP->fx_where + fixP->fx_frag->fr_address;
22796 return (base + 4) & ~3;
22798 /* ARM mode branches are offset by +8. However, the Windows CE
22799 loader expects the relocation not to take this into account. */
22800 case BFD_RELOC_ARM_PCREL_BLX:
22802 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22803 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22804 && ARM_IS_FUNC (fixP->fx_addsy)
22805 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22806 base = fixP->fx_where + fixP->fx_frag->fr_address;
22809 case BFD_RELOC_ARM_PCREL_CALL:
22811 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22812 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
22813 && THUMB_IS_FUNC (fixP->fx_addsy)
22814 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
22815 base = fixP->fx_where + fixP->fx_frag->fr_address;
22818 case BFD_RELOC_ARM_PCREL_BRANCH:
22819 case BFD_RELOC_ARM_PCREL_JUMP:
22820 case BFD_RELOC_ARM_PLT32:
22822 /* When handling fixups immediately, because we have already
22823 discovered the value of a symbol, or the address of the frag involved
22824 we must account for the offset by +8, as the OS loader will never see the reloc.
22825 see fixup_segment() in write.c
22826 The S_IS_EXTERNAL test handles the case of global symbols.
22827 Those need the calculated base, not just the pipe compensation the linker will need. */
22829 && fixP->fx_addsy != NULL
22830 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
22831 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
22839 /* ARM mode loads relative to PC are also offset by +8. Unlike
22840 branches, the Windows CE loader *does* expect the relocation
22841 to take this into account. */
22842 case BFD_RELOC_ARM_OFFSET_IMM:
22843 case BFD_RELOC_ARM_OFFSET_IMM8:
22844 case BFD_RELOC_ARM_HWLITERAL:
22845 case BFD_RELOC_ARM_LITERAL:
22846 case BFD_RELOC_ARM_CP_OFF_IMM:
22850 /* Other PC-relative relocations are un-offset. */
22856 static bfd_boolean flag_warn_syms = TRUE;
22859 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
22861 /* PR 18347 - Warn if the user attempts to create a symbol with the same
22862 name as an ARM instruction. Whilst strictly speaking it is allowed, it
22863 does mean that the resulting code might be very confusing to the reader.
22864 Also this warning can be triggered if the user omits an operand before
22865 an immediate address, eg:
22869 GAS treats this as an assignment of the value of the symbol foo to a
22870 symbol LDR, and so (without this code) it will not issue any kind of
22871 warning or error message.
22873 Note - ARM instructions are case-insensitive but the strings in the hash
22874 table are all stored in lower case, so we must first ensure that name is
22876 if (flag_warn_syms && arm_ops_hsh)
22878 char * nbuf = strdup (name);
22881 for (p = nbuf; *p; p++)
22883 if (hash_find (arm_ops_hsh, nbuf) != NULL)
22885 static struct hash_control * already_warned = NULL;
22887 if (already_warned == NULL)
22888 already_warned = hash_new ();
22889 /* Only warn about the symbol once. To keep the code
22890 simple we let hash_insert do the lookup for us. */
22891 if (hash_insert (already_warned, name, NULL) == NULL)
22892 as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
22901 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
22902 Otherwise we have no need to default values of symbols. */
22905 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
22908 if (name[0] == '_' && name[1] == 'G'
22909 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
22913 if (symbol_find (name))
22914 as_bad (_("GOT already in the symbol table"));
22916 GOT_symbol = symbol_new (name, undefined_section,
22917 (valueT) 0, & zero_address_frag);
22927 /* Subroutine of md_apply_fix. Check to see if an immediate can be
22928 computed as two separate immediate values, added together. We
22929 already know that this value cannot be computed by just one ARM
22932 static unsigned int
22933 validate_immediate_twopart (unsigned int val,
22934 unsigned int * highpart)
22939 for (i = 0; i < 32; i += 2)
22940 if (((a = rotate_left (val, i)) & 0xff) != 0)
22946 * highpart = (a >> 8) | ((i + 24) << 7);
22948 else if (a & 0xff0000)
22950 if (a & 0xff000000)
22952 * highpart = (a >> 16) | ((i + 16) << 7);
22956 gas_assert (a & 0xff000000);
22957 * highpart = (a >> 24) | ((i + 8) << 7);
22960 return (a & 0xff) | (i << 7);
22967 validate_offset_imm (unsigned int val, int hwse)
22969 if ((hwse && val > 255) || val > 4095)
22974 /* Subroutine of md_apply_fix. Do those data_ops which can take a
22975 negative immediate constant by altering the instruction. A bit of
22980 by inverting the second operand, and
22983 by negating the second operand. */
22986 negate_data_op (unsigned long * instruction,
22987 unsigned long value)
22990 unsigned long negated, inverted;
22992 negated = encode_arm_immediate (-value);
22993 inverted = encode_arm_immediate (~value);
22995 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
22998 /* First negates. */
22999 case OPCODE_SUB: /* ADD <-> SUB */
23000 new_inst = OPCODE_ADD;
23005 new_inst = OPCODE_SUB;
23009 case OPCODE_CMP: /* CMP <-> CMN */
23010 new_inst = OPCODE_CMN;
23015 new_inst = OPCODE_CMP;
23019 /* Now Inverted ops. */
23020 case OPCODE_MOV: /* MOV <-> MVN */
23021 new_inst = OPCODE_MVN;
23026 new_inst = OPCODE_MOV;
23030 case OPCODE_AND: /* AND <-> BIC */
23031 new_inst = OPCODE_BIC;
23036 new_inst = OPCODE_AND;
23040 case OPCODE_ADC: /* ADC <-> SBC */
23041 new_inst = OPCODE_SBC;
23046 new_inst = OPCODE_ADC;
23050 /* We cannot do anything. */
23055 if (value == (unsigned) FAIL)
23058 *instruction &= OPCODE_MASK;
23059 *instruction |= new_inst << DATA_OP_SHIFT;
23063 /* Like negate_data_op, but for Thumb-2. */
23065 static unsigned int
23066 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
23070 unsigned int negated, inverted;
23072 negated = encode_thumb32_immediate (-value);
23073 inverted = encode_thumb32_immediate (~value);
23075 rd = (*instruction >> 8) & 0xf;
23076 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
23079 /* ADD <-> SUB. Includes CMP <-> CMN. */
23080 case T2_OPCODE_SUB:
23081 new_inst = T2_OPCODE_ADD;
23085 case T2_OPCODE_ADD:
23086 new_inst = T2_OPCODE_SUB;
23090 /* ORR <-> ORN. Includes MOV <-> MVN. */
23091 case T2_OPCODE_ORR:
23092 new_inst = T2_OPCODE_ORN;
23096 case T2_OPCODE_ORN:
23097 new_inst = T2_OPCODE_ORR;
23101 /* AND <-> BIC. TST has no inverted equivalent. */
23102 case T2_OPCODE_AND:
23103 new_inst = T2_OPCODE_BIC;
23110 case T2_OPCODE_BIC:
23111 new_inst = T2_OPCODE_AND;
23116 case T2_OPCODE_ADC:
23117 new_inst = T2_OPCODE_SBC;
23121 case T2_OPCODE_SBC:
23122 new_inst = T2_OPCODE_ADC;
23126 /* We cannot do anything. */
23131 if (value == (unsigned int)FAIL)
23134 *instruction &= T2_OPCODE_MASK;
23135 *instruction |= new_inst << T2_DATA_OP_SHIFT;
23139 /* Read a 32-bit thumb instruction from buf. */
23141 static unsigned long
23142 get_thumb32_insn (char * buf)
23144 unsigned long insn;
23145 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
23146 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23151 /* We usually want to set the low bit on the address of thumb function
23152 symbols. In particular .word foo - . should have the low bit set.
23153 Generic code tries to fold the difference of two symbols to
23154 a constant. Prevent this and force a relocation when the first symbols
23155 is a thumb function. */
23158 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
23160 if (op == O_subtract
23161 && l->X_op == O_symbol
23162 && r->X_op == O_symbol
23163 && THUMB_IS_FUNC (l->X_add_symbol))
23165 l->X_op = O_subtract;
23166 l->X_op_symbol = r->X_add_symbol;
23167 l->X_add_number -= r->X_add_number;
23171 /* Process as normal. */
23175 /* Encode Thumb2 unconditional branches and calls. The encoding
23176 for the 2 are identical for the immediate values. */
23179 encode_thumb2_b_bl_offset (char * buf, offsetT value)
23181 #define T2I1I2MASK ((1 << 13) | (1 << 11))
23184 addressT S, I1, I2, lo, hi;
23186 S = (value >> 24) & 0x01;
23187 I1 = (value >> 23) & 0x01;
23188 I2 = (value >> 22) & 0x01;
23189 hi = (value >> 12) & 0x3ff;
23190 lo = (value >> 1) & 0x7ff;
23191 newval = md_chars_to_number (buf, THUMB_SIZE);
23192 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23193 newval |= (S << 10) | hi;
23194 newval2 &= ~T2I1I2MASK;
23195 newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
23196 md_number_to_chars (buf, newval, THUMB_SIZE);
23197 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23201 md_apply_fix (fixS * fixP,
23205 offsetT value = * valP;
23207 unsigned int newimm;
23208 unsigned long temp;
23210 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
23212 gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
23214 /* Note whether this will delete the relocation. */
23216 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
23219 /* On a 64-bit host, silently truncate 'value' to 32 bits for
23220 consistency with the behaviour on 32-bit hosts. Remember value
23222 value &= 0xffffffff;
23223 value ^= 0x80000000;
23224 value -= 0x80000000;
23227 fixP->fx_addnumber = value;
23229 /* Same treatment for fixP->fx_offset. */
23230 fixP->fx_offset &= 0xffffffff;
23231 fixP->fx_offset ^= 0x80000000;
23232 fixP->fx_offset -= 0x80000000;
23234 switch (fixP->fx_r_type)
23236 case BFD_RELOC_NONE:
23237 /* This will need to go in the object file. */
23241 case BFD_RELOC_ARM_IMMEDIATE:
23242 /* We claim that this fixup has been processed here,
23243 even if in fact we generate an error because we do
23244 not have a reloc for it, so tc_gen_reloc will reject it. */
23247 if (fixP->fx_addsy)
23249 const char *msg = 0;
23251 if (! S_IS_DEFINED (fixP->fx_addsy))
23252 msg = _("undefined symbol %s used as an immediate value");
23253 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23254 msg = _("symbol %s is in a different section");
23255 else if (S_IS_WEAK (fixP->fx_addsy))
23256 msg = _("symbol %s is weak and may be overridden later");
23260 as_bad_where (fixP->fx_file, fixP->fx_line,
23261 msg, S_GET_NAME (fixP->fx_addsy));
23266 temp = md_chars_to_number (buf, INSN_SIZE);
23268 /* If the offset is negative, we should use encoding A2 for ADR. */
23269 if ((temp & 0xfff0000) == 0x28f0000 && value < 0)
23270 newimm = negate_data_op (&temp, value);
23273 newimm = encode_arm_immediate (value);
23275 /* If the instruction will fail, see if we can fix things up by
23276 changing the opcode. */
23277 if (newimm == (unsigned int) FAIL)
23278 newimm = negate_data_op (&temp, value);
23279 /* MOV accepts both ARM modified immediate (A1 encoding) and
23280 UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
23281 When disassembling, MOV is preferred when there is no encoding
23283 if (newimm == (unsigned int) FAIL
23284 && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
23285 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
23286 && !((temp >> SBIT_SHIFT) & 0x1)
23287 && value >= 0 && value <= 0xffff)
23289 /* Clear bits[23:20] to change encoding from A1 to A2. */
23290 temp &= 0xff0fffff;
23291 /* Encoding high 4bits imm. Code below will encode the remaining
23293 temp |= (value & 0x0000f000) << 4;
23294 newimm = value & 0x00000fff;
23298 if (newimm == (unsigned int) FAIL)
23300 as_bad_where (fixP->fx_file, fixP->fx_line,
23301 _("invalid constant (%lx) after fixup"),
23302 (unsigned long) value);
23306 newimm |= (temp & 0xfffff000);
23307 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23310 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
23312 unsigned int highpart = 0;
23313 unsigned int newinsn = 0xe1a00000; /* nop. */
23315 if (fixP->fx_addsy)
23317 const char *msg = 0;
23319 if (! S_IS_DEFINED (fixP->fx_addsy))
23320 msg = _("undefined symbol %s used as an immediate value");
23321 else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
23322 msg = _("symbol %s is in a different section");
23323 else if (S_IS_WEAK (fixP->fx_addsy))
23324 msg = _("symbol %s is weak and may be overridden later");
23328 as_bad_where (fixP->fx_file, fixP->fx_line,
23329 msg, S_GET_NAME (fixP->fx_addsy));
23334 newimm = encode_arm_immediate (value);
23335 temp = md_chars_to_number (buf, INSN_SIZE);
23337 /* If the instruction will fail, see if we can fix things up by
23338 changing the opcode. */
23339 if (newimm == (unsigned int) FAIL
23340 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
23342 /* No ? OK - try using two ADD instructions to generate
23344 newimm = validate_immediate_twopart (value, & highpart);
23346 /* Yes - then make sure that the second instruction is
23348 if (newimm != (unsigned int) FAIL)
23350 /* Still No ? Try using a negated value. */
23351 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
23352 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
23353 /* Otherwise - give up. */
23356 as_bad_where (fixP->fx_file, fixP->fx_line,
23357 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
23362 /* Replace the first operand in the 2nd instruction (which
23363 is the PC) with the destination register. We have
23364 already added in the PC in the first instruction and we
23365 do not want to do it again. */
23366 newinsn &= ~ 0xf0000;
23367 newinsn |= ((newinsn & 0x0f000) << 4);
23370 newimm |= (temp & 0xfffff000);
23371 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
23373 highpart |= (newinsn & 0xfffff000);
23374 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
23378 case BFD_RELOC_ARM_OFFSET_IMM:
23379 if (!fixP->fx_done && seg->use_rela_p)
23381 /* Fall through. */
23383 case BFD_RELOC_ARM_LITERAL:
23389 if (validate_offset_imm (value, 0) == FAIL)
23391 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
23392 as_bad_where (fixP->fx_file, fixP->fx_line,
23393 _("invalid literal constant: pool needs to be closer"));
23395 as_bad_where (fixP->fx_file, fixP->fx_line,
23396 _("bad immediate value for offset (%ld)"),
23401 newval = md_chars_to_number (buf, INSN_SIZE);
23403 newval &= 0xfffff000;
23406 newval &= 0xff7ff000;
23407 newval |= value | (sign ? INDEX_UP : 0);
23409 md_number_to_chars (buf, newval, INSN_SIZE);
23412 case BFD_RELOC_ARM_OFFSET_IMM8:
23413 case BFD_RELOC_ARM_HWLITERAL:
23419 if (validate_offset_imm (value, 1) == FAIL)
23421 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
23422 as_bad_where (fixP->fx_file, fixP->fx_line,
23423 _("invalid literal constant: pool needs to be closer"));
23425 as_bad_where (fixP->fx_file, fixP->fx_line,
23426 _("bad immediate value for 8-bit offset (%ld)"),
23431 newval = md_chars_to_number (buf, INSN_SIZE);
23433 newval &= 0xfffff0f0;
23436 newval &= 0xff7ff0f0;
23437 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
23439 md_number_to_chars (buf, newval, INSN_SIZE);
23442 case BFD_RELOC_ARM_T32_OFFSET_U8:
23443 if (value < 0 || value > 1020 || value % 4 != 0)
23444 as_bad_where (fixP->fx_file, fixP->fx_line,
23445 _("bad immediate value for offset (%ld)"), (long) value);
23448 newval = md_chars_to_number (buf+2, THUMB_SIZE);
23450 md_number_to_chars (buf+2, newval, THUMB_SIZE);
23453 case BFD_RELOC_ARM_T32_OFFSET_IMM:
23454 /* This is a complicated relocation used for all varieties of Thumb32
23455 load/store instruction with immediate offset:
23457 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
23458 *4, optional writeback(W)
23459 (doubleword load/store)
23461 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
23462 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
23463 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
23464 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
23465 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
23467 Uppercase letters indicate bits that are already encoded at
23468 this point. Lowercase letters are our problem. For the
23469 second block of instructions, the secondary opcode nybble
23470 (bits 8..11) is present, and bit 23 is zero, even if this is
23471 a PC-relative operation. */
23472 newval = md_chars_to_number (buf, THUMB_SIZE);
23474 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
23476 if ((newval & 0xf0000000) == 0xe0000000)
23478 /* Doubleword load/store: 8-bit offset, scaled by 4. */
23480 newval |= (1 << 23);
23483 if (value % 4 != 0)
23485 as_bad_where (fixP->fx_file, fixP->fx_line,
23486 _("offset not a multiple of 4"));
23492 as_bad_where (fixP->fx_file, fixP->fx_line,
23493 _("offset out of range"));
23498 else if ((newval & 0x000f0000) == 0x000f0000)
23500 /* PC-relative, 12-bit offset. */
23502 newval |= (1 << 23);
23507 as_bad_where (fixP->fx_file, fixP->fx_line,
23508 _("offset out of range"));
23513 else if ((newval & 0x00000100) == 0x00000100)
23515 /* Writeback: 8-bit, +/- offset. */
23517 newval |= (1 << 9);
23522 as_bad_where (fixP->fx_file, fixP->fx_line,
23523 _("offset out of range"));
23528 else if ((newval & 0x00000f00) == 0x00000e00)
23530 /* T-instruction: positive 8-bit offset. */
23531 if (value < 0 || value > 0xff)
23533 as_bad_where (fixP->fx_file, fixP->fx_line,
23534 _("offset out of range"));
23542 /* Positive 12-bit or negative 8-bit offset. */
23546 newval |= (1 << 23);
23556 as_bad_where (fixP->fx_file, fixP->fx_line,
23557 _("offset out of range"));
23564 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
23565 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
23568 case BFD_RELOC_ARM_SHIFT_IMM:
23569 newval = md_chars_to_number (buf, INSN_SIZE);
23570 if (((unsigned long) value) > 32
23572 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
23574 as_bad_where (fixP->fx_file, fixP->fx_line,
23575 _("shift expression is too large"));
23580 /* Shifts of zero must be done as lsl. */
23582 else if (value == 32)
23584 newval &= 0xfffff07f;
23585 newval |= (value & 0x1f) << 7;
23586 md_number_to_chars (buf, newval, INSN_SIZE);
23589 case BFD_RELOC_ARM_T32_IMMEDIATE:
23590 case BFD_RELOC_ARM_T32_ADD_IMM:
23591 case BFD_RELOC_ARM_T32_IMM12:
23592 case BFD_RELOC_ARM_T32_ADD_PC12:
23593 /* We claim that this fixup has been processed here,
23594 even if in fact we generate an error because we do
23595 not have a reloc for it, so tc_gen_reloc will reject it. */
23599 && ! S_IS_DEFINED (fixP->fx_addsy))
23601 as_bad_where (fixP->fx_file, fixP->fx_line,
23602 _("undefined symbol %s used as an immediate value"),
23603 S_GET_NAME (fixP->fx_addsy));
23607 newval = md_chars_to_number (buf, THUMB_SIZE);
23609 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
23612 if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
23613 /* ARMv8-M Baseline MOV will reach here, but it doesn't support
23614 Thumb2 modified immediate encoding (T2). */
23615 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
23616 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23618 newimm = encode_thumb32_immediate (value);
23619 if (newimm == (unsigned int) FAIL)
23620 newimm = thumb32_negate_data_op (&newval, value);
23622 if (newimm == (unsigned int) FAIL)
23624 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
23626 /* Turn add/sum into addw/subw. */
23627 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
23628 newval = (newval & 0xfeffffff) | 0x02000000;
23629 /* No flat 12-bit imm encoding for addsw/subsw. */
23630 if ((newval & 0x00100000) == 0)
23632 /* 12 bit immediate for addw/subw. */
23636 newval ^= 0x00a00000;
23639 newimm = (unsigned int) FAIL;
23646 /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
23647 UINT16 (T3 encoding), MOVW only accepts UINT16. When
23648 disassembling, MOV is preferred when there is no encoding
23650 if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
23651 /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
23652 but with the Rn field [19:16] set to 1111. */
23653 && (((newval >> 16) & 0xf) == 0xf)
23654 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
23655 && !((newval >> T2_SBIT_SHIFT) & 0x1)
23656 && value >= 0 && value <= 0xffff)
23658 /* Toggle bit[25] to change encoding from T2 to T3. */
23660 /* Clear bits[19:16]. */
23661 newval &= 0xfff0ffff;
23662 /* Encoding high 4bits imm. Code below will encode the
23663 remaining low 12bits. */
23664 newval |= (value & 0x0000f000) << 4;
23665 newimm = value & 0x00000fff;
23670 if (newimm == (unsigned int)FAIL)
23672 as_bad_where (fixP->fx_file, fixP->fx_line,
23673 _("invalid constant (%lx) after fixup"),
23674 (unsigned long) value);
23678 newval |= (newimm & 0x800) << 15;
23679 newval |= (newimm & 0x700) << 4;
23680 newval |= (newimm & 0x0ff);
23682 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
23683 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
23686 case BFD_RELOC_ARM_SMC:
23687 if (((unsigned long) value) > 0xffff)
23688 as_bad_where (fixP->fx_file, fixP->fx_line,
23689 _("invalid smc expression"));
23690 newval = md_chars_to_number (buf, INSN_SIZE);
23691 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23692 md_number_to_chars (buf, newval, INSN_SIZE);
23695 case BFD_RELOC_ARM_HVC:
23696 if (((unsigned long) value) > 0xffff)
23697 as_bad_where (fixP->fx_file, fixP->fx_line,
23698 _("invalid hvc expression"));
23699 newval = md_chars_to_number (buf, INSN_SIZE);
23700 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
23701 md_number_to_chars (buf, newval, INSN_SIZE);
23704 case BFD_RELOC_ARM_SWI:
23705 if (fixP->tc_fix_data != 0)
23707 if (((unsigned long) value) > 0xff)
23708 as_bad_where (fixP->fx_file, fixP->fx_line,
23709 _("invalid swi expression"));
23710 newval = md_chars_to_number (buf, THUMB_SIZE);
23712 md_number_to_chars (buf, newval, THUMB_SIZE);
23716 if (((unsigned long) value) > 0x00ffffff)
23717 as_bad_where (fixP->fx_file, fixP->fx_line,
23718 _("invalid swi expression"));
23719 newval = md_chars_to_number (buf, INSN_SIZE);
23721 md_number_to_chars (buf, newval, INSN_SIZE);
23725 case BFD_RELOC_ARM_MULTI:
23726 if (((unsigned long) value) > 0xffff)
23727 as_bad_where (fixP->fx_file, fixP->fx_line,
23728 _("invalid expression in load/store multiple"));
23729 newval = value | md_chars_to_number (buf, INSN_SIZE);
23730 md_number_to_chars (buf, newval, INSN_SIZE);
23734 case BFD_RELOC_ARM_PCREL_CALL:
23736 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23738 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23739 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23740 && THUMB_IS_FUNC (fixP->fx_addsy))
23741 /* Flip the bl to blx. This is a simple flip
23742 bit here because we generate PCREL_CALL for
23743 unconditional bls. */
23745 newval = md_chars_to_number (buf, INSN_SIZE);
23746 newval = newval | 0x10000000;
23747 md_number_to_chars (buf, newval, INSN_SIZE);
23753 goto arm_branch_common;
23755 case BFD_RELOC_ARM_PCREL_JUMP:
23756 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23758 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23759 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23760 && THUMB_IS_FUNC (fixP->fx_addsy))
23762 /* This would map to a bl<cond>, b<cond>,
23763 b<always> to a Thumb function. We
23764 need to force a relocation for this particular
23766 newval = md_chars_to_number (buf, INSN_SIZE);
23769 /* Fall through. */
23771 case BFD_RELOC_ARM_PLT32:
23773 case BFD_RELOC_ARM_PCREL_BRANCH:
23775 goto arm_branch_common;
23777 case BFD_RELOC_ARM_PCREL_BLX:
23780 if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
23782 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23783 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23784 && ARM_IS_FUNC (fixP->fx_addsy))
23786 /* Flip the blx to a bl and warn. */
23787 const char *name = S_GET_NAME (fixP->fx_addsy);
23788 newval = 0xeb000000;
23789 as_warn_where (fixP->fx_file, fixP->fx_line,
23790 _("blx to '%s' an ARM ISA state function changed to bl"),
23792 md_number_to_chars (buf, newval, INSN_SIZE);
23798 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
23799 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
23803 /* We are going to store value (shifted right by two) in the
23804 instruction, in a 24 bit, signed field. Bits 26 through 32 either
23805 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
23808 as_bad_where (fixP->fx_file, fixP->fx_line,
23809 _("misaligned branch destination"));
23810 if ((value & (offsetT)0xfe000000) != (offsetT)0
23811 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
23812 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23814 if (fixP->fx_done || !seg->use_rela_p)
23816 newval = md_chars_to_number (buf, INSN_SIZE);
23817 newval |= (value >> 2) & 0x00ffffff;
23818 /* Set the H bit on BLX instructions. */
23822 newval |= 0x01000000;
23824 newval &= ~0x01000000;
23826 md_number_to_chars (buf, newval, INSN_SIZE);
23830 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
23831 /* CBZ can only branch forward. */
23833 /* Attempts to use CBZ to branch to the next instruction
23834 (which, strictly speaking, are prohibited) will be turned into
23837 FIXME: It may be better to remove the instruction completely and
23838 perform relaxation. */
23841 newval = md_chars_to_number (buf, THUMB_SIZE);
23842 newval = 0xbf00; /* NOP encoding T1 */
23843 md_number_to_chars (buf, newval, THUMB_SIZE);
23848 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23850 if (fixP->fx_done || !seg->use_rela_p)
23852 newval = md_chars_to_number (buf, THUMB_SIZE);
23853 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
23854 md_number_to_chars (buf, newval, THUMB_SIZE);
23859 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
23860 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
23861 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23863 if (fixP->fx_done || !seg->use_rela_p)
23865 newval = md_chars_to_number (buf, THUMB_SIZE);
23866 newval |= (value & 0x1ff) >> 1;
23867 md_number_to_chars (buf, newval, THUMB_SIZE);
23871 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
23872 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
23873 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23875 if (fixP->fx_done || !seg->use_rela_p)
23877 newval = md_chars_to_number (buf, THUMB_SIZE);
23878 newval |= (value & 0xfff) >> 1;
23879 md_number_to_chars (buf, newval, THUMB_SIZE);
23883 case BFD_RELOC_THUMB_PCREL_BRANCH20:
23885 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23886 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23887 && ARM_IS_FUNC (fixP->fx_addsy)
23888 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23890 /* Force a relocation for a branch 20 bits wide. */
23893 if ((value & ~0x1fffff) && ((value & ~0x0fffff) != ~0x0fffff))
23894 as_bad_where (fixP->fx_file, fixP->fx_line,
23895 _("conditional branch out of range"));
23897 if (fixP->fx_done || !seg->use_rela_p)
23900 addressT S, J1, J2, lo, hi;
23902 S = (value & 0x00100000) >> 20;
23903 J2 = (value & 0x00080000) >> 19;
23904 J1 = (value & 0x00040000) >> 18;
23905 hi = (value & 0x0003f000) >> 12;
23906 lo = (value & 0x00000ffe) >> 1;
23908 newval = md_chars_to_number (buf, THUMB_SIZE);
23909 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23910 newval |= (S << 10) | hi;
23911 newval2 |= (J1 << 13) | (J2 << 11) | lo;
23912 md_number_to_chars (buf, newval, THUMB_SIZE);
23913 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
23917 case BFD_RELOC_THUMB_PCREL_BLX:
23918 /* If there is a blx from a thumb state function to
23919 another thumb function flip this to a bl and warn
23923 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23924 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23925 && THUMB_IS_FUNC (fixP->fx_addsy))
23927 const char *name = S_GET_NAME (fixP->fx_addsy);
23928 as_warn_where (fixP->fx_file, fixP->fx_line,
23929 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
23931 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23932 newval = newval | 0x1000;
23933 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23934 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23939 goto thumb_bl_common;
23941 case BFD_RELOC_THUMB_PCREL_BRANCH23:
23942 /* A bl from Thumb state ISA to an internal ARM state function
23943 is converted to a blx. */
23945 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
23946 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
23947 && ARM_IS_FUNC (fixP->fx_addsy)
23948 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
23950 newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
23951 newval = newval & ~0x1000;
23952 md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
23953 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
23959 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23960 /* For a BLX instruction, make sure that the relocation is rounded up
23961 to a word boundary. This follows the semantics of the instruction
23962 which specifies that bit 1 of the target address will come from bit
23963 1 of the base address. */
23964 value = (value + 3) & ~ 3;
23967 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
23968 && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
23969 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
23972 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
23974 if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
23975 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23976 else if ((value & ~0x1ffffff)
23977 && ((value & ~0x1ffffff) != ~0x1ffffff))
23978 as_bad_where (fixP->fx_file, fixP->fx_line,
23979 _("Thumb2 branch out of range"));
23982 if (fixP->fx_done || !seg->use_rela_p)
23983 encode_thumb2_b_bl_offset (buf, value);
23987 case BFD_RELOC_THUMB_PCREL_BRANCH25:
23988 if ((value & ~0x0ffffff) && ((value & ~0x0ffffff) != ~0x0ffffff))
23989 as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
23991 if (fixP->fx_done || !seg->use_rela_p)
23992 encode_thumb2_b_bl_offset (buf, value);
23997 if (fixP->fx_done || !seg->use_rela_p)
24002 if (fixP->fx_done || !seg->use_rela_p)
24003 md_number_to_chars (buf, value, 2);
24007 case BFD_RELOC_ARM_TLS_CALL:
24008 case BFD_RELOC_ARM_THM_TLS_CALL:
24009 case BFD_RELOC_ARM_TLS_DESCSEQ:
24010 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24011 case BFD_RELOC_ARM_TLS_GOTDESC:
24012 case BFD_RELOC_ARM_TLS_GD32:
24013 case BFD_RELOC_ARM_TLS_LE32:
24014 case BFD_RELOC_ARM_TLS_IE32:
24015 case BFD_RELOC_ARM_TLS_LDM32:
24016 case BFD_RELOC_ARM_TLS_LDO32:
24017 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24020 /* Same handling as above, but with the arm_fdpic guard. */
24021 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24022 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24023 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24026 S_SET_THREAD_LOCAL (fixP->fx_addsy);
24030 as_bad_where (fixP->fx_file, fixP->fx_line,
24031 _("Relocation supported only in FDPIC mode"));
24035 case BFD_RELOC_ARM_GOT32:
24036 case BFD_RELOC_ARM_GOTOFF:
24039 case BFD_RELOC_ARM_GOT_PREL:
24040 if (fixP->fx_done || !seg->use_rela_p)
24041 md_number_to_chars (buf, value, 4);
24044 case BFD_RELOC_ARM_TARGET2:
24045 /* TARGET2 is not partial-inplace, so we need to write the
24046 addend here for REL targets, because it won't be written out
24047 during reloc processing later. */
24048 if (fixP->fx_done || !seg->use_rela_p)
24049 md_number_to_chars (buf, fixP->fx_offset, 4);
24052 /* Relocations for FDPIC. */
24053 case BFD_RELOC_ARM_GOTFUNCDESC:
24054 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24055 case BFD_RELOC_ARM_FUNCDESC:
24058 if (fixP->fx_done || !seg->use_rela_p)
24059 md_number_to_chars (buf, 0, 4);
24063 as_bad_where (fixP->fx_file, fixP->fx_line,
24064 _("Relocation supported only in FDPIC mode"));
24069 case BFD_RELOC_RVA:
24071 case BFD_RELOC_ARM_TARGET1:
24072 case BFD_RELOC_ARM_ROSEGREL32:
24073 case BFD_RELOC_ARM_SBREL32:
24074 case BFD_RELOC_32_PCREL:
24076 case BFD_RELOC_32_SECREL:
24078 if (fixP->fx_done || !seg->use_rela_p)
24080 /* For WinCE we only do this for pcrel fixups. */
24081 if (fixP->fx_done || fixP->fx_pcrel)
24083 md_number_to_chars (buf, value, 4);
24087 case BFD_RELOC_ARM_PREL31:
24088 if (fixP->fx_done || !seg->use_rela_p)
24090 newval = md_chars_to_number (buf, 4) & 0x80000000;
24091 if ((value ^ (value >> 1)) & 0x40000000)
24093 as_bad_where (fixP->fx_file, fixP->fx_line,
24094 _("rel31 relocation overflow"));
24096 newval |= value & 0x7fffffff;
24097 md_number_to_chars (buf, newval, 4);
24102 case BFD_RELOC_ARM_CP_OFF_IMM:
24103 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
24104 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
24105 newval = md_chars_to_number (buf, INSN_SIZE);
24107 newval = get_thumb32_insn (buf);
24108 if ((newval & 0x0f200f00) == 0x0d000900)
24110 /* This is a fp16 vstr/vldr. The immediate offset in the mnemonic
24111 has permitted values that are multiples of 2, in the range 0
24113 if (value < -510 || value > 510 || (value & 1))
24114 as_bad_where (fixP->fx_file, fixP->fx_line,
24115 _("co-processor offset out of range"));
24117 else if (value < -1023 || value > 1023 || (value & 3))
24118 as_bad_where (fixP->fx_file, fixP->fx_line,
24119 _("co-processor offset out of range"));
24124 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24125 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24126 newval = md_chars_to_number (buf, INSN_SIZE);
24128 newval = get_thumb32_insn (buf);
24130 newval &= 0xffffff00;
24133 newval &= 0xff7fff00;
24134 if ((newval & 0x0f200f00) == 0x0d000900)
24136 /* This is a fp16 vstr/vldr.
24138 It requires the immediate offset in the instruction is shifted
24139 left by 1 to be a half-word offset.
24141 Here, left shift by 1 first, and later right shift by 2
24142 should get the right offset. */
24145 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
24147 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
24148 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
24149 md_number_to_chars (buf, newval, INSN_SIZE);
24151 put_thumb32_insn (buf, newval);
24154 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
24155 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
24156 if (value < -255 || value > 255)
24157 as_bad_where (fixP->fx_file, fixP->fx_line,
24158 _("co-processor offset out of range"));
24160 goto cp_off_common;
24162 case BFD_RELOC_ARM_THUMB_OFFSET:
24163 newval = md_chars_to_number (buf, THUMB_SIZE);
24164 /* Exactly what ranges, and where the offset is inserted depends
24165 on the type of instruction, we can establish this from the
24167 switch (newval >> 12)
24169 case 4: /* PC load. */
24170 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
24171 forced to zero for these loads; md_pcrel_from has already
24172 compensated for this. */
24174 as_bad_where (fixP->fx_file, fixP->fx_line,
24175 _("invalid offset, target not word aligned (0x%08lX)"),
24176 (((unsigned long) fixP->fx_frag->fr_address
24177 + (unsigned long) fixP->fx_where) & ~3)
24178 + (unsigned long) value);
24180 if (value & ~0x3fc)
24181 as_bad_where (fixP->fx_file, fixP->fx_line,
24182 _("invalid offset, value too big (0x%08lX)"),
24185 newval |= value >> 2;
24188 case 9: /* SP load/store. */
24189 if (value & ~0x3fc)
24190 as_bad_where (fixP->fx_file, fixP->fx_line,
24191 _("invalid offset, value too big (0x%08lX)"),
24193 newval |= value >> 2;
24196 case 6: /* Word load/store. */
24198 as_bad_where (fixP->fx_file, fixP->fx_line,
24199 _("invalid offset, value too big (0x%08lX)"),
24201 newval |= value << 4; /* 6 - 2. */
24204 case 7: /* Byte load/store. */
24206 as_bad_where (fixP->fx_file, fixP->fx_line,
24207 _("invalid offset, value too big (0x%08lX)"),
24209 newval |= value << 6;
24212 case 8: /* Halfword load/store. */
24214 as_bad_where (fixP->fx_file, fixP->fx_line,
24215 _("invalid offset, value too big (0x%08lX)"),
24217 newval |= value << 5; /* 6 - 1. */
24221 as_bad_where (fixP->fx_file, fixP->fx_line,
24222 "Unable to process relocation for thumb opcode: %lx",
24223 (unsigned long) newval);
24226 md_number_to_chars (buf, newval, THUMB_SIZE);
24229 case BFD_RELOC_ARM_THUMB_ADD:
24230 /* This is a complicated relocation, since we use it for all of
24231 the following immediate relocations:
24235 9bit ADD/SUB SP word-aligned
24236 10bit ADD PC/SP word-aligned
24238 The type of instruction being processed is encoded in the
24245 newval = md_chars_to_number (buf, THUMB_SIZE);
24247 int rd = (newval >> 4) & 0xf;
24248 int rs = newval & 0xf;
24249 int subtract = !!(newval & 0x8000);
24251 /* Check for HI regs, only very restricted cases allowed:
24252 Adjusting SP, and using PC or SP to get an address. */
24253 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
24254 || (rs > 7 && rs != REG_SP && rs != REG_PC))
24255 as_bad_where (fixP->fx_file, fixP->fx_line,
24256 _("invalid Hi register with immediate"));
24258 /* If value is negative, choose the opposite instruction. */
24262 subtract = !subtract;
24264 as_bad_where (fixP->fx_file, fixP->fx_line,
24265 _("immediate value out of range"));
24270 if (value & ~0x1fc)
24271 as_bad_where (fixP->fx_file, fixP->fx_line,
24272 _("invalid immediate for stack address calculation"));
24273 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
24274 newval |= value >> 2;
24276 else if (rs == REG_PC || rs == REG_SP)
24278 /* PR gas/18541. If the addition is for a defined symbol
24279 within range of an ADR instruction then accept it. */
24282 && fixP->fx_addsy != NULL)
24286 if (! S_IS_DEFINED (fixP->fx_addsy)
24287 || S_GET_SEGMENT (fixP->fx_addsy) != seg
24288 || S_IS_WEAK (fixP->fx_addsy))
24290 as_bad_where (fixP->fx_file, fixP->fx_line,
24291 _("address calculation needs a strongly defined nearby symbol"));
24295 offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
24297 /* Round up to the next 4-byte boundary. */
24302 v = S_GET_VALUE (fixP->fx_addsy) - v;
24306 as_bad_where (fixP->fx_file, fixP->fx_line,
24307 _("symbol too far away"));
24317 if (subtract || value & ~0x3fc)
24318 as_bad_where (fixP->fx_file, fixP->fx_line,
24319 _("invalid immediate for address calculation (value = 0x%08lX)"),
24320 (unsigned long) (subtract ? - value : value));
24321 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
24323 newval |= value >> 2;
24328 as_bad_where (fixP->fx_file, fixP->fx_line,
24329 _("immediate value out of range"));
24330 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
24331 newval |= (rd << 8) | value;
24336 as_bad_where (fixP->fx_file, fixP->fx_line,
24337 _("immediate value out of range"));
24338 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
24339 newval |= rd | (rs << 3) | (value << 6);
24342 md_number_to_chars (buf, newval, THUMB_SIZE);
24345 case BFD_RELOC_ARM_THUMB_IMM:
24346 newval = md_chars_to_number (buf, THUMB_SIZE);
24347 if (value < 0 || value > 255)
24348 as_bad_where (fixP->fx_file, fixP->fx_line,
24349 _("invalid immediate: %ld is out of range"),
24352 md_number_to_chars (buf, newval, THUMB_SIZE);
24355 case BFD_RELOC_ARM_THUMB_SHIFT:
24356 /* 5bit shift value (0..32). LSL cannot take 32. */
24357 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
24358 temp = newval & 0xf800;
24359 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
24360 as_bad_where (fixP->fx_file, fixP->fx_line,
24361 _("invalid shift value: %ld"), (long) value);
24362 /* Shifts of zero must be encoded as LSL. */
24364 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
24365 /* Shifts of 32 are encoded as zero. */
24366 else if (value == 32)
24368 newval |= value << 6;
24369 md_number_to_chars (buf, newval, THUMB_SIZE);
24372 case BFD_RELOC_VTABLE_INHERIT:
24373 case BFD_RELOC_VTABLE_ENTRY:
24377 case BFD_RELOC_ARM_MOVW:
24378 case BFD_RELOC_ARM_MOVT:
24379 case BFD_RELOC_ARM_THUMB_MOVW:
24380 case BFD_RELOC_ARM_THUMB_MOVT:
24381 if (fixP->fx_done || !seg->use_rela_p)
24383 /* REL format relocations are limited to a 16-bit addend. */
24384 if (!fixP->fx_done)
24386 if (value < -0x8000 || value > 0x7fff)
24387 as_bad_where (fixP->fx_file, fixP->fx_line,
24388 _("offset out of range"));
24390 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
24391 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24396 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
24397 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
24399 newval = get_thumb32_insn (buf);
24400 newval &= 0xfbf08f00;
24401 newval |= (value & 0xf000) << 4;
24402 newval |= (value & 0x0800) << 15;
24403 newval |= (value & 0x0700) << 4;
24404 newval |= (value & 0x00ff);
24405 put_thumb32_insn (buf, newval);
24409 newval = md_chars_to_number (buf, 4);
24410 newval &= 0xfff0f000;
24411 newval |= value & 0x0fff;
24412 newval |= (value & 0xf000) << 4;
24413 md_number_to_chars (buf, newval, 4);
24418 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24419 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24420 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24421 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24422 gas_assert (!fixP->fx_done);
24425 bfd_boolean is_mov;
24426 bfd_vma encoded_addend = value;
24428 /* Check that addend can be encoded in instruction. */
24429 if (!seg->use_rela_p && (value < 0 || value > 255))
24430 as_bad_where (fixP->fx_file, fixP->fx_line,
24431 _("the offset 0x%08lX is not representable"),
24432 (unsigned long) encoded_addend);
24434 /* Extract the instruction. */
24435 insn = md_chars_to_number (buf, THUMB_SIZE);
24436 is_mov = (insn & 0xf800) == 0x2000;
24441 if (!seg->use_rela_p)
24442 insn |= encoded_addend;
24448 /* Extract the instruction. */
24449 /* Encoding is the following
24454 /* The following conditions must be true :
24459 rd = (insn >> 4) & 0xf;
24461 if ((insn & 0x8000) || (rd != rs) || rd > 7)
24462 as_bad_where (fixP->fx_file, fixP->fx_line,
24463 _("Unable to process relocation for thumb opcode: %lx"),
24464 (unsigned long) insn);
24466 /* Encode as ADD immediate8 thumb 1 code. */
24467 insn = 0x3000 | (rd << 8);
24469 /* Place the encoded addend into the first 8 bits of the
24471 if (!seg->use_rela_p)
24472 insn |= encoded_addend;
24475 /* Update the instruction. */
24476 md_number_to_chars (buf, insn, THUMB_SIZE);
24480 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24481 case BFD_RELOC_ARM_ALU_PC_G0:
24482 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24483 case BFD_RELOC_ARM_ALU_PC_G1:
24484 case BFD_RELOC_ARM_ALU_PC_G2:
24485 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24486 case BFD_RELOC_ARM_ALU_SB_G0:
24487 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24488 case BFD_RELOC_ARM_ALU_SB_G1:
24489 case BFD_RELOC_ARM_ALU_SB_G2:
24490 gas_assert (!fixP->fx_done);
24491 if (!seg->use_rela_p)
24494 bfd_vma encoded_addend;
24495 bfd_vma addend_abs = abs (value);
24497 /* Check that the absolute value of the addend can be
24498 expressed as an 8-bit constant plus a rotation. */
24499 encoded_addend = encode_arm_immediate (addend_abs);
24500 if (encoded_addend == (unsigned int) FAIL)
24501 as_bad_where (fixP->fx_file, fixP->fx_line,
24502 _("the offset 0x%08lX is not representable"),
24503 (unsigned long) addend_abs);
24505 /* Extract the instruction. */
24506 insn = md_chars_to_number (buf, INSN_SIZE);
24508 /* If the addend is positive, use an ADD instruction.
24509 Otherwise use a SUB. Take care not to destroy the S bit. */
24510 insn &= 0xff1fffff;
24516 /* Place the encoded addend into the first 12 bits of the
24518 insn &= 0xfffff000;
24519 insn |= encoded_addend;
24521 /* Update the instruction. */
24522 md_number_to_chars (buf, insn, INSN_SIZE);
24526 case BFD_RELOC_ARM_LDR_PC_G0:
24527 case BFD_RELOC_ARM_LDR_PC_G1:
24528 case BFD_RELOC_ARM_LDR_PC_G2:
24529 case BFD_RELOC_ARM_LDR_SB_G0:
24530 case BFD_RELOC_ARM_LDR_SB_G1:
24531 case BFD_RELOC_ARM_LDR_SB_G2:
24532 gas_assert (!fixP->fx_done);
24533 if (!seg->use_rela_p)
24536 bfd_vma addend_abs = abs (value);
24538 /* Check that the absolute value of the addend can be
24539 encoded in 12 bits. */
24540 if (addend_abs >= 0x1000)
24541 as_bad_where (fixP->fx_file, fixP->fx_line,
24542 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
24543 (unsigned long) addend_abs);
24545 /* Extract the instruction. */
24546 insn = md_chars_to_number (buf, INSN_SIZE);
24548 /* If the addend is negative, clear bit 23 of the instruction.
24549 Otherwise set it. */
24551 insn &= ~(1 << 23);
24555 /* Place the absolute value of the addend into the first 12 bits
24556 of the instruction. */
24557 insn &= 0xfffff000;
24558 insn |= addend_abs;
24560 /* Update the instruction. */
24561 md_number_to_chars (buf, insn, INSN_SIZE);
24565 case BFD_RELOC_ARM_LDRS_PC_G0:
24566 case BFD_RELOC_ARM_LDRS_PC_G1:
24567 case BFD_RELOC_ARM_LDRS_PC_G2:
24568 case BFD_RELOC_ARM_LDRS_SB_G0:
24569 case BFD_RELOC_ARM_LDRS_SB_G1:
24570 case BFD_RELOC_ARM_LDRS_SB_G2:
24571 gas_assert (!fixP->fx_done);
24572 if (!seg->use_rela_p)
24575 bfd_vma addend_abs = abs (value);
24577 /* Check that the absolute value of the addend can be
24578 encoded in 8 bits. */
24579 if (addend_abs >= 0x100)
24580 as_bad_where (fixP->fx_file, fixP->fx_line,
24581 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
24582 (unsigned long) addend_abs);
24584 /* Extract the instruction. */
24585 insn = md_chars_to_number (buf, INSN_SIZE);
24587 /* If the addend is negative, clear bit 23 of the instruction.
24588 Otherwise set it. */
24590 insn &= ~(1 << 23);
24594 /* Place the first four bits of the absolute value of the addend
24595 into the first 4 bits of the instruction, and the remaining
24596 four into bits 8 .. 11. */
24597 insn &= 0xfffff0f0;
24598 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
24600 /* Update the instruction. */
24601 md_number_to_chars (buf, insn, INSN_SIZE);
24605 case BFD_RELOC_ARM_LDC_PC_G0:
24606 case BFD_RELOC_ARM_LDC_PC_G1:
24607 case BFD_RELOC_ARM_LDC_PC_G2:
24608 case BFD_RELOC_ARM_LDC_SB_G0:
24609 case BFD_RELOC_ARM_LDC_SB_G1:
24610 case BFD_RELOC_ARM_LDC_SB_G2:
24611 gas_assert (!fixP->fx_done);
24612 if (!seg->use_rela_p)
24615 bfd_vma addend_abs = abs (value);
24617 /* Check that the absolute value of the addend is a multiple of
24618 four and, when divided by four, fits in 8 bits. */
24619 if (addend_abs & 0x3)
24620 as_bad_where (fixP->fx_file, fixP->fx_line,
24621 _("bad offset 0x%08lX (must be word-aligned)"),
24622 (unsigned long) addend_abs);
24624 if ((addend_abs >> 2) > 0xff)
24625 as_bad_where (fixP->fx_file, fixP->fx_line,
24626 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
24627 (unsigned long) addend_abs);
24629 /* Extract the instruction. */
24630 insn = md_chars_to_number (buf, INSN_SIZE);
24632 /* If the addend is negative, clear bit 23 of the instruction.
24633 Otherwise set it. */
24635 insn &= ~(1 << 23);
24639 /* Place the addend (divided by four) into the first eight
24640 bits of the instruction. */
24641 insn &= 0xfffffff0;
24642 insn |= addend_abs >> 2;
24644 /* Update the instruction. */
24645 md_number_to_chars (buf, insn, INSN_SIZE);
24649 case BFD_RELOC_ARM_V4BX:
24650 /* This will need to go in the object file. */
24654 case BFD_RELOC_UNUSED:
24656 as_bad_where (fixP->fx_file, fixP->fx_line,
24657 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
24661 /* Translate internal representation of relocation info to BFD target
24665 tc_gen_reloc (asection *section, fixS *fixp)
24668 bfd_reloc_code_real_type code;
24670 reloc = XNEW (arelent);
24672 reloc->sym_ptr_ptr = XNEW (asymbol *);
24673 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
24674 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
24676 if (fixp->fx_pcrel)
24678 if (section->use_rela_p)
24679 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
24681 fixp->fx_offset = reloc->address;
24683 reloc->addend = fixp->fx_offset;
24685 switch (fixp->fx_r_type)
24688 if (fixp->fx_pcrel)
24690 code = BFD_RELOC_8_PCREL;
24693 /* Fall through. */
24696 if (fixp->fx_pcrel)
24698 code = BFD_RELOC_16_PCREL;
24701 /* Fall through. */
24704 if (fixp->fx_pcrel)
24706 code = BFD_RELOC_32_PCREL;
24709 /* Fall through. */
24711 case BFD_RELOC_ARM_MOVW:
24712 if (fixp->fx_pcrel)
24714 code = BFD_RELOC_ARM_MOVW_PCREL;
24717 /* Fall through. */
24719 case BFD_RELOC_ARM_MOVT:
24720 if (fixp->fx_pcrel)
24722 code = BFD_RELOC_ARM_MOVT_PCREL;
24725 /* Fall through. */
24727 case BFD_RELOC_ARM_THUMB_MOVW:
24728 if (fixp->fx_pcrel)
24730 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
24733 /* Fall through. */
24735 case BFD_RELOC_ARM_THUMB_MOVT:
24736 if (fixp->fx_pcrel)
24738 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
24741 /* Fall through. */
24743 case BFD_RELOC_NONE:
24744 case BFD_RELOC_ARM_PCREL_BRANCH:
24745 case BFD_RELOC_ARM_PCREL_BLX:
24746 case BFD_RELOC_RVA:
24747 case BFD_RELOC_THUMB_PCREL_BRANCH7:
24748 case BFD_RELOC_THUMB_PCREL_BRANCH9:
24749 case BFD_RELOC_THUMB_PCREL_BRANCH12:
24750 case BFD_RELOC_THUMB_PCREL_BRANCH20:
24751 case BFD_RELOC_THUMB_PCREL_BRANCH23:
24752 case BFD_RELOC_THUMB_PCREL_BRANCH25:
24753 case BFD_RELOC_VTABLE_ENTRY:
24754 case BFD_RELOC_VTABLE_INHERIT:
24756 case BFD_RELOC_32_SECREL:
24758 code = fixp->fx_r_type;
24761 case BFD_RELOC_THUMB_PCREL_BLX:
24763 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
24764 code = BFD_RELOC_THUMB_PCREL_BRANCH23;
24767 code = BFD_RELOC_THUMB_PCREL_BLX;
24770 case BFD_RELOC_ARM_LITERAL:
24771 case BFD_RELOC_ARM_HWLITERAL:
24772 /* If this is called then the a literal has
24773 been referenced across a section boundary. */
24774 as_bad_where (fixp->fx_file, fixp->fx_line,
24775 _("literal referenced across section boundary"));
24779 case BFD_RELOC_ARM_TLS_CALL:
24780 case BFD_RELOC_ARM_THM_TLS_CALL:
24781 case BFD_RELOC_ARM_TLS_DESCSEQ:
24782 case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
24783 case BFD_RELOC_ARM_GOT32:
24784 case BFD_RELOC_ARM_GOTOFF:
24785 case BFD_RELOC_ARM_GOT_PREL:
24786 case BFD_RELOC_ARM_PLT32:
24787 case BFD_RELOC_ARM_TARGET1:
24788 case BFD_RELOC_ARM_ROSEGREL32:
24789 case BFD_RELOC_ARM_SBREL32:
24790 case BFD_RELOC_ARM_PREL31:
24791 case BFD_RELOC_ARM_TARGET2:
24792 case BFD_RELOC_ARM_TLS_LDO32:
24793 case BFD_RELOC_ARM_PCREL_CALL:
24794 case BFD_RELOC_ARM_PCREL_JUMP:
24795 case BFD_RELOC_ARM_ALU_PC_G0_NC:
24796 case BFD_RELOC_ARM_ALU_PC_G0:
24797 case BFD_RELOC_ARM_ALU_PC_G1_NC:
24798 case BFD_RELOC_ARM_ALU_PC_G1:
24799 case BFD_RELOC_ARM_ALU_PC_G2:
24800 case BFD_RELOC_ARM_LDR_PC_G0:
24801 case BFD_RELOC_ARM_LDR_PC_G1:
24802 case BFD_RELOC_ARM_LDR_PC_G2:
24803 case BFD_RELOC_ARM_LDRS_PC_G0:
24804 case BFD_RELOC_ARM_LDRS_PC_G1:
24805 case BFD_RELOC_ARM_LDRS_PC_G2:
24806 case BFD_RELOC_ARM_LDC_PC_G0:
24807 case BFD_RELOC_ARM_LDC_PC_G1:
24808 case BFD_RELOC_ARM_LDC_PC_G2:
24809 case BFD_RELOC_ARM_ALU_SB_G0_NC:
24810 case BFD_RELOC_ARM_ALU_SB_G0:
24811 case BFD_RELOC_ARM_ALU_SB_G1_NC:
24812 case BFD_RELOC_ARM_ALU_SB_G1:
24813 case BFD_RELOC_ARM_ALU_SB_G2:
24814 case BFD_RELOC_ARM_LDR_SB_G0:
24815 case BFD_RELOC_ARM_LDR_SB_G1:
24816 case BFD_RELOC_ARM_LDR_SB_G2:
24817 case BFD_RELOC_ARM_LDRS_SB_G0:
24818 case BFD_RELOC_ARM_LDRS_SB_G1:
24819 case BFD_RELOC_ARM_LDRS_SB_G2:
24820 case BFD_RELOC_ARM_LDC_SB_G0:
24821 case BFD_RELOC_ARM_LDC_SB_G1:
24822 case BFD_RELOC_ARM_LDC_SB_G2:
24823 case BFD_RELOC_ARM_V4BX:
24824 case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
24825 case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
24826 case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
24827 case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
24828 case BFD_RELOC_ARM_GOTFUNCDESC:
24829 case BFD_RELOC_ARM_GOTOFFFUNCDESC:
24830 case BFD_RELOC_ARM_FUNCDESC:
24831 code = fixp->fx_r_type;
24834 case BFD_RELOC_ARM_TLS_GOTDESC:
24835 case BFD_RELOC_ARM_TLS_GD32:
24836 case BFD_RELOC_ARM_TLS_GD32_FDPIC:
24837 case BFD_RELOC_ARM_TLS_LE32:
24838 case BFD_RELOC_ARM_TLS_IE32:
24839 case BFD_RELOC_ARM_TLS_IE32_FDPIC:
24840 case BFD_RELOC_ARM_TLS_LDM32:
24841 case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
24842 /* BFD will include the symbol's address in the addend.
24843 But we don't want that, so subtract it out again here. */
24844 if (!S_IS_COMMON (fixp->fx_addsy))
24845 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
24846 code = fixp->fx_r_type;
24850 case BFD_RELOC_ARM_IMMEDIATE:
24851 as_bad_where (fixp->fx_file, fixp->fx_line,
24852 _("internal relocation (type: IMMEDIATE) not fixed up"));
24855 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
24856 as_bad_where (fixp->fx_file, fixp->fx_line,
24857 _("ADRL used for a symbol not defined in the same file"));
24860 case BFD_RELOC_ARM_OFFSET_IMM:
24861 if (section->use_rela_p)
24863 code = fixp->fx_r_type;
24867 if (fixp->fx_addsy != NULL
24868 && !S_IS_DEFINED (fixp->fx_addsy)
24869 && S_IS_LOCAL (fixp->fx_addsy))
24871 as_bad_where (fixp->fx_file, fixp->fx_line,
24872 _("undefined local label `%s'"),
24873 S_GET_NAME (fixp->fx_addsy));
24877 as_bad_where (fixp->fx_file, fixp->fx_line,
24878 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
24885 switch (fixp->fx_r_type)
24887 case BFD_RELOC_NONE: type = "NONE"; break;
24888 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
24889 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
24890 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
24891 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
24892 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
24893 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
24894 case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
24895 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
24896 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
24897 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
24898 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
24899 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
24900 default: type = _("<unknown>"); break;
24902 as_bad_where (fixp->fx_file, fixp->fx_line,
24903 _("cannot represent %s relocation in this object file format"),
24910 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
24912 && fixp->fx_addsy == GOT_symbol)
24914 code = BFD_RELOC_ARM_GOTPC;
24915 reloc->addend = fixp->fx_offset = reloc->address;
24919 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
24921 if (reloc->howto == NULL)
24923 as_bad_where (fixp->fx_file, fixp->fx_line,
24924 _("cannot represent %s relocation in this object file format"),
24925 bfd_get_reloc_code_name (code));
24929 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
24930 vtable entry to be used in the relocation's section offset. */
24931 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
24932 reloc->address = fixp->fx_offset;
24937 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
24940 cons_fix_new_arm (fragS * frag,
24944 bfd_reloc_code_real_type reloc)
24949 FIXME: @@ Should look at CPU word size. */
24953 reloc = BFD_RELOC_8;
24956 reloc = BFD_RELOC_16;
24960 reloc = BFD_RELOC_32;
24963 reloc = BFD_RELOC_64;
24968 if (exp->X_op == O_secrel)
24970 exp->X_op = O_symbol;
24971 reloc = BFD_RELOC_32_SECREL;
24975 fix_new_exp (frag, where, size, exp, pcrel, reloc);
24978 #if defined (OBJ_COFF)
24980 arm_validate_fix (fixS * fixP)
24982 /* If the destination of the branch is a defined symbol which does not have
24983 the THUMB_FUNC attribute, then we must be calling a function which has
24984 the (interfacearm) attribute. We look for the Thumb entry point to that
24985 function and change the branch to refer to that function instead. */
24986 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
24987 && fixP->fx_addsy != NULL
24988 && S_IS_DEFINED (fixP->fx_addsy)
24989 && ! THUMB_IS_FUNC (fixP->fx_addsy))
24991 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
24998 arm_force_relocation (struct fix * fixp)
25000 #if defined (OBJ_COFF) && defined (TE_PE)
25001 if (fixp->fx_r_type == BFD_RELOC_RVA)
25005 /* In case we have a call or a branch to a function in ARM ISA mode from
25006 a thumb function or vice-versa force the relocation. These relocations
25007 are cleared off for some cores that might have blx and simple transformations
25011 switch (fixp->fx_r_type)
25013 case BFD_RELOC_ARM_PCREL_JUMP:
25014 case BFD_RELOC_ARM_PCREL_CALL:
25015 case BFD_RELOC_THUMB_PCREL_BLX:
25016 if (THUMB_IS_FUNC (fixp->fx_addsy))
25020 case BFD_RELOC_ARM_PCREL_BLX:
25021 case BFD_RELOC_THUMB_PCREL_BRANCH25:
25022 case BFD_RELOC_THUMB_PCREL_BRANCH20:
25023 case BFD_RELOC_THUMB_PCREL_BRANCH23:
25024 if (ARM_IS_FUNC (fixp->fx_addsy))
25033 /* Resolve these relocations even if the symbol is extern or weak.
25034 Technically this is probably wrong due to symbol preemption.
25035 In practice these relocations do not have enough range to be useful
25036 at dynamic link time, and some code (e.g. in the Linux kernel)
25037 expects these references to be resolved. */
25038 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
25039 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
25040 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
25041 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
25042 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
25043 || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
25044 || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
25045 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
25046 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
25047 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
25048 || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
25049 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
25050 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
25051 || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
25054 /* Always leave these relocations for the linker. */
25055 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25056 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25057 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25060 /* Always generate relocations against function symbols. */
25061 if (fixp->fx_r_type == BFD_RELOC_32
25063 && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
25066 return generic_force_reloc (fixp);
25069 #if defined (OBJ_ELF) || defined (OBJ_COFF)
25070 /* Relocations against function names must be left unadjusted,
25071 so that the linker can use this information to generate interworking
25072 stubs. The MIPS version of this function
25073 also prevents relocations that are mips-16 specific, but I do not
25074 know why it does this.
25077 There is one other problem that ought to be addressed here, but
25078 which currently is not: Taking the address of a label (rather
25079 than a function) and then later jumping to that address. Such
25080 addresses also ought to have their bottom bit set (assuming that
25081 they reside in Thumb code), but at the moment they will not. */
25084 arm_fix_adjustable (fixS * fixP)
25086 if (fixP->fx_addsy == NULL)
25089 /* Preserve relocations against symbols with function type. */
25090 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
25093 if (THUMB_IS_FUNC (fixP->fx_addsy)
25094 && fixP->fx_subsy == NULL)
25097 /* We need the symbol name for the VTABLE entries. */
25098 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
25099 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
25102 /* Don't allow symbols to be discarded on GOT related relocs. */
25103 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
25104 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
25105 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
25106 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
25107 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
25108 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
25109 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
25110 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
25111 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
25112 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
25113 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
25114 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
25115 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
25116 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
25117 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
25118 || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
25119 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
25122 /* Similarly for group relocations. */
25123 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
25124 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
25125 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
25128 /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols. */
25129 if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
25130 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
25131 || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
25132 || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
25133 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
25134 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
25135 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
25136 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
25139 /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
25140 offsets, so keep these symbols. */
25141 if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
25142 && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
25147 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
25151 elf32_arm_target_format (void)
25154 return (target_big_endian
25155 ? "elf32-bigarm-symbian"
25156 : "elf32-littlearm-symbian");
25157 #elif defined (TE_VXWORKS)
25158 return (target_big_endian
25159 ? "elf32-bigarm-vxworks"
25160 : "elf32-littlearm-vxworks");
25161 #elif defined (TE_NACL)
25162 return (target_big_endian
25163 ? "elf32-bigarm-nacl"
25164 : "elf32-littlearm-nacl");
25168 if (target_big_endian)
25169 return "elf32-bigarm-fdpic";
25171 return "elf32-littlearm-fdpic";
25175 if (target_big_endian)
25176 return "elf32-bigarm";
25178 return "elf32-littlearm";
25184 armelf_frob_symbol (symbolS * symp,
25187 elf_frob_symbol (symp, puntp);
25191 /* MD interface: Finalization. */
25196 literal_pool * pool;
25198 /* Ensure that all the IT blocks are properly closed. */
25199 check_it_blocks_finished ();
25201 for (pool = list_of_pools; pool; pool = pool->next)
25203 /* Put it at the end of the relevant section. */
25204 subseg_set (pool->section, pool->sub_section);
25206 arm_elf_change_section ();
25213 /* Remove any excess mapping symbols generated for alignment frags in
25214 SEC. We may have created a mapping symbol before a zero byte
25215 alignment; remove it if there's a mapping symbol after the
25218 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
25219 void *dummy ATTRIBUTE_UNUSED)
25221 segment_info_type *seginfo = seg_info (sec);
25224 if (seginfo == NULL || seginfo->frchainP == NULL)
25227 for (fragp = seginfo->frchainP->frch_root;
25229 fragp = fragp->fr_next)
25231 symbolS *sym = fragp->tc_frag_data.last_map;
25232 fragS *next = fragp->fr_next;
25234 /* Variable-sized frags have been converted to fixed size by
25235 this point. But if this was variable-sized to start with,
25236 there will be a fixed-size frag after it. So don't handle
25238 if (sym == NULL || next == NULL)
25241 if (S_GET_VALUE (sym) < next->fr_address)
25242 /* Not at the end of this frag. */
25244 know (S_GET_VALUE (sym) == next->fr_address);
25248 if (next->tc_frag_data.first_map != NULL)
25250 /* Next frag starts with a mapping symbol. Discard this
25252 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25256 if (next->fr_next == NULL)
25258 /* This mapping symbol is at the end of the section. Discard
25260 know (next->fr_fix == 0 && next->fr_var == 0);
25261 symbol_remove (sym, &symbol_rootP, &symbol_lastP);
25265 /* As long as we have empty frags without any mapping symbols,
25267 /* If the next frag is non-empty and does not start with a
25268 mapping symbol, then this mapping symbol is required. */
25269 if (next->fr_address != next->fr_next->fr_address)
25272 next = next->fr_next;
25274 while (next != NULL);
25279 /* Adjust the symbol table. This marks Thumb symbols as distinct from
25283 arm_adjust_symtab (void)
25288 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25290 if (ARM_IS_THUMB (sym))
25292 if (THUMB_IS_FUNC (sym))
25294 /* Mark the symbol as a Thumb function. */
25295 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
25296 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
25297 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
25299 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
25300 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
25302 as_bad (_("%s: unexpected function type: %d"),
25303 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
25305 else switch (S_GET_STORAGE_CLASS (sym))
25308 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
25311 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
25314 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
25322 if (ARM_IS_INTERWORK (sym))
25323 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
25330 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
25332 if (ARM_IS_THUMB (sym))
25334 elf_symbol_type * elf_sym;
25336 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
25337 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
25339 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
25340 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
25342 /* If it's a .thumb_func, declare it as so,
25343 otherwise tag label as .code 16. */
25344 if (THUMB_IS_FUNC (sym))
25345 ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
25346 ST_BRANCH_TO_THUMB);
25347 else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
25348 elf_sym->internal_elf_sym.st_info =
25349 ELF_ST_INFO (bind, STT_ARM_16BIT);
25354 /* Remove any overlapping mapping symbols generated by alignment frags. */
25355 bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
25356 /* Now do generic ELF adjustments. */
25357 elf_adjust_symtab ();
25361 /* MD interface: Initialization. */
25364 set_constant_flonums (void)
25368 for (i = 0; i < NUM_FLOAT_VALS; i++)
25369 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
25373 /* Auto-select Thumb mode if it's the only available instruction set for the
25374 given architecture. */
25377 autoselect_thumb_from_cpu_variant (void)
25379 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
25380 opcode_select (16);
25389 if ( (arm_ops_hsh = hash_new ()) == NULL
25390 || (arm_cond_hsh = hash_new ()) == NULL
25391 || (arm_shift_hsh = hash_new ()) == NULL
25392 || (arm_psr_hsh = hash_new ()) == NULL
25393 || (arm_v7m_psr_hsh = hash_new ()) == NULL
25394 || (arm_reg_hsh = hash_new ()) == NULL
25395 || (arm_reloc_hsh = hash_new ()) == NULL
25396 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
25397 as_fatal (_("virtual memory exhausted"));
25399 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
25400 hash_insert (arm_ops_hsh, insns[i].template_name, (void *) (insns + i));
25401 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
25402 hash_insert (arm_cond_hsh, conds[i].template_name, (void *) (conds + i));
25403 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
25404 hash_insert (arm_shift_hsh, shift_names[i].name, (void *) (shift_names + i));
25405 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
25406 hash_insert (arm_psr_hsh, psrs[i].template_name, (void *) (psrs + i));
25407 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
25408 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
25409 (void *) (v7m_psrs + i));
25410 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
25411 hash_insert (arm_reg_hsh, reg_names[i].name, (void *) (reg_names + i));
25413 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
25415 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
25416 (void *) (barrier_opt_names + i));
25418 for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
25420 struct reloc_entry * entry = reloc_names + i;
25422 if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
25423 /* This makes encode_branch() use the EABI versions of this relocation. */
25424 entry->reloc = BFD_RELOC_UNUSED;
25426 hash_insert (arm_reloc_hsh, entry->name, (void *) entry);
25430 set_constant_flonums ();
25432 /* Set the cpu variant based on the command-line options. We prefer
25433 -mcpu= over -march= if both are set (as for GCC); and we prefer
25434 -mfpu= over any other way of setting the floating point unit.
25435 Use of legacy options with new options are faulted. */
25438 if (mcpu_cpu_opt || march_cpu_opt)
25439 as_bad (_("use of old and new-style options to set CPU type"));
25441 selected_arch = *legacy_cpu;
25443 else if (mcpu_cpu_opt)
25445 selected_arch = *mcpu_cpu_opt;
25446 selected_ext = *mcpu_ext_opt;
25448 else if (march_cpu_opt)
25450 selected_arch = *march_cpu_opt;
25451 selected_ext = *march_ext_opt;
25453 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
25458 as_bad (_("use of old and new-style options to set FPU type"));
25460 selected_fpu = *legacy_fpu;
25463 selected_fpu = *mfpu_opt;
25466 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
25467 || defined (TE_NetBSD) || defined (TE_VXWORKS))
25468 /* Some environments specify a default FPU. If they don't, infer it
25469 from the processor. */
25471 selected_fpu = *mcpu_fpu_opt;
25472 else if (march_fpu_opt)
25473 selected_fpu = *march_fpu_opt;
25475 selected_fpu = fpu_default;
25479 if (ARM_FEATURE_ZERO (selected_fpu))
25481 if (!no_cpu_selected ())
25482 selected_fpu = fpu_default;
25484 selected_fpu = fpu_arch_fpa;
25488 if (ARM_FEATURE_ZERO (selected_arch))
25490 selected_arch = cpu_default;
25491 selected_cpu = selected_arch;
25493 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25495 /* Autodection of feature mode: allow all features in cpu_variant but leave
25496 selected_cpu unset. It will be set in aeabi_set_public_attributes ()
25497 after all instruction have been processed and we can decide what CPU
25498 should be selected. */
25499 if (ARM_FEATURE_ZERO (selected_arch))
25500 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
25502 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
25505 autoselect_thumb_from_cpu_variant ();
25507 arm_arch_used = thumb_arch_used = arm_arch_none;
25509 #if defined OBJ_COFF || defined OBJ_ELF
25511 unsigned int flags = 0;
25513 #if defined OBJ_ELF
25514 flags = meabi_flags;
25516 switch (meabi_flags)
25518 case EF_ARM_EABI_UNKNOWN:
25520 /* Set the flags in the private structure. */
25521 if (uses_apcs_26) flags |= F_APCS26;
25522 if (support_interwork) flags |= F_INTERWORK;
25523 if (uses_apcs_float) flags |= F_APCS_FLOAT;
25524 if (pic_code) flags |= F_PIC;
25525 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
25526 flags |= F_SOFT_FLOAT;
25528 switch (mfloat_abi_opt)
25530 case ARM_FLOAT_ABI_SOFT:
25531 case ARM_FLOAT_ABI_SOFTFP:
25532 flags |= F_SOFT_FLOAT;
25535 case ARM_FLOAT_ABI_HARD:
25536 if (flags & F_SOFT_FLOAT)
25537 as_bad (_("hard-float conflicts with specified fpu"));
25541 /* Using pure-endian doubles (even if soft-float). */
25542 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
25543 flags |= F_VFP_FLOAT;
25545 #if defined OBJ_ELF
25546 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
25547 flags |= EF_ARM_MAVERICK_FLOAT;
25550 case EF_ARM_EABI_VER4:
25551 case EF_ARM_EABI_VER5:
25552 /* No additional flags to set. */
25559 bfd_set_private_flags (stdoutput, flags);
25561 /* We have run out flags in the COFF header to encode the
25562 status of ATPCS support, so instead we create a dummy,
25563 empty, debug section called .arm.atpcs. */
25568 sec = bfd_make_section (stdoutput, ".arm.atpcs");
25572 bfd_set_section_flags
25573 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
25574 bfd_set_section_size (stdoutput, sec, 0);
25575 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
25581 /* Record the CPU type as well. */
25582 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
25583 mach = bfd_mach_arm_iWMMXt2;
25584 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
25585 mach = bfd_mach_arm_iWMMXt;
25586 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
25587 mach = bfd_mach_arm_XScale;
25588 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
25589 mach = bfd_mach_arm_ep9312;
25590 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
25591 mach = bfd_mach_arm_5TE;
25592 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
25594 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25595 mach = bfd_mach_arm_5T;
25597 mach = bfd_mach_arm_5;
25599 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
25601 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
25602 mach = bfd_mach_arm_4T;
25604 mach = bfd_mach_arm_4;
25606 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
25607 mach = bfd_mach_arm_3M;
25608 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
25609 mach = bfd_mach_arm_3;
25610 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
25611 mach = bfd_mach_arm_2a;
25612 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
25613 mach = bfd_mach_arm_2;
25615 mach = bfd_mach_arm_unknown;
25617 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
25620 /* Command line processing. */
25623 Invocation line includes a switch not recognized by the base assembler.
25624 See if it's a processor-specific option.
25626 This routine is somewhat complicated by the need for backwards
25627 compatibility (since older releases of gcc can't be changed).
25628 The new options try to make the interface as compatible as
25631 New options (supported) are:
25633 -mcpu=<cpu name> Assemble for selected processor
25634 -march=<architecture name> Assemble for selected architecture
25635 -mfpu=<fpu architecture> Assemble for selected FPU.
25636 -EB/-mbig-endian Big-endian
25637 -EL/-mlittle-endian Little-endian
25638 -k Generate PIC code
25639 -mthumb Start in Thumb mode
25640 -mthumb-interwork Code supports ARM/Thumb interworking
25642 -m[no-]warn-deprecated Warn about deprecated features
25643 -m[no-]warn-syms Warn when symbols match instructions
25645 For now we will also provide support for:
25647 -mapcs-32 32-bit Program counter
25648 -mapcs-26 26-bit Program counter
25649 -macps-float Floats passed in FP registers
25650 -mapcs-reentrant Reentrant code
25652 (sometime these will probably be replaced with -mapcs=<list of options>
25653 and -matpcs=<list of options>)
25655 The remaining options are only supported for back-wards compatibility.
25656 Cpu variants, the arm part is optional:
25657 -m[arm]1 Currently not supported.
25658 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
25659 -m[arm]3 Arm 3 processor
25660 -m[arm]6[xx], Arm 6 processors
25661 -m[arm]7[xx][t][[d]m] Arm 7 processors
25662 -m[arm]8[10] Arm 8 processors
25663 -m[arm]9[20][tdmi] Arm 9 processors
25664 -mstrongarm[110[0]] StrongARM processors
25665 -mxscale XScale processors
25666 -m[arm]v[2345[t[e]]] Arm architectures
25667 -mall All (except the ARM1)
25669 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
25670 -mfpe-old (No float load/store multiples)
25671 -mvfpxd VFP Single precision
25673 -mno-fpu Disable all floating point instructions
25675 The following CPU names are recognized:
25676 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
25677 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
25678 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
25679 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
25680 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
25681 arm10t arm10e, arm1020t, arm1020e, arm10200e,
25682 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
25686 const char * md_shortopts = "m:k";
25688 #ifdef ARM_BI_ENDIAN
25689 #define OPTION_EB (OPTION_MD_BASE + 0)
25690 #define OPTION_EL (OPTION_MD_BASE + 1)
25692 #if TARGET_BYTES_BIG_ENDIAN
25693 #define OPTION_EB (OPTION_MD_BASE + 0)
25695 #define OPTION_EL (OPTION_MD_BASE + 1)
25698 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
25699 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
25701 struct option md_longopts[] =
25704 {"EB", no_argument, NULL, OPTION_EB},
25707 {"EL", no_argument, NULL, OPTION_EL},
25709 {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
25711 {"fdpic", no_argument, NULL, OPTION_FDPIC},
25713 {NULL, no_argument, NULL, 0}
25716 size_t md_longopts_size = sizeof (md_longopts);
25718 struct arm_option_table
25720 const char * option; /* Option name to match. */
25721 const char * help; /* Help information. */
25722 int * var; /* Variable to change. */
25723 int value; /* What to change it to. */
25724 const char * deprecated; /* If non-null, print this message. */
25727 struct arm_option_table arm_opts[] =
25729 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
25730 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
25731 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
25732 &support_interwork, 1, NULL},
25733 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
25734 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
25735 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
25737 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
25738 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
25739 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
25740 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
25743 /* These are recognized by the assembler, but have no affect on code. */
25744 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
25745 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
25747 {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
25748 {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
25749 &warn_on_deprecated, 0, NULL},
25750 {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
25751 {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
25752 {NULL, NULL, NULL, 0, NULL}
25755 struct arm_legacy_option_table
25757 const char * option; /* Option name to match. */
25758 const arm_feature_set ** var; /* Variable to change. */
25759 const arm_feature_set value; /* What to change it to. */
25760 const char * deprecated; /* If non-null, print this message. */
25763 const struct arm_legacy_option_table arm_legacy_opts[] =
25765 /* DON'T add any new processors to this list -- we want the whole list
25766 to go away... Add them to the processors table instead. */
25767 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25768 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
25769 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25770 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
25771 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25772 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
25773 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25774 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
25775 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25776 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
25777 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25778 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
25779 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25780 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
25781 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25782 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
25783 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25784 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
25785 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25786 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
25787 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25788 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
25789 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25790 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
25791 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25792 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
25793 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25794 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
25795 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25796 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
25797 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25798 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
25799 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25800 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
25801 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25802 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
25803 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25804 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
25805 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25806 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
25807 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25808 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
25809 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25810 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
25811 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25812 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
25813 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25814 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25815 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25816 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
25817 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25818 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
25819 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25820 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
25821 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25822 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
25823 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25824 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
25825 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25826 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
25827 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25828 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
25829 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25830 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
25831 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25832 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
25833 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25834 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
25835 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
25836 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
25837 N_("use -mcpu=strongarm110")},
25838 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
25839 N_("use -mcpu=strongarm1100")},
25840 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
25841 N_("use -mcpu=strongarm1110")},
25842 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
25843 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
25844 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
25846 /* Architecture variants -- don't add any more to this list either. */
25847 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25848 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
25849 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25850 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
25851 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25852 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
25853 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25854 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
25855 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25856 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
25857 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25858 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
25859 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25860 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
25861 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25862 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
25863 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25864 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
25866 /* Floating point variants -- don't add any more to this list either. */
25867 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
25868 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
25869 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
25870 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
25871 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
25873 {NULL, NULL, ARM_ARCH_NONE, NULL}
25876 struct arm_cpu_option_table
25880 const arm_feature_set value;
25881 const arm_feature_set ext;
25882 /* For some CPUs we assume an FPU unless the user explicitly sets
25884 const arm_feature_set default_fpu;
25885 /* The canonical name of the CPU, or NULL to use NAME converted to upper
25887 const char * canonical_name;
25890 /* This list should, at a minimum, contain all the cpu names
25891 recognized by GCC. */
25892 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
25894 static const struct arm_cpu_option_table arm_cpus[] =
25896 ARM_CPU_OPT ("all", NULL, ARM_ANY,
25899 ARM_CPU_OPT ("arm1", NULL, ARM_ARCH_V1,
25902 ARM_CPU_OPT ("arm2", NULL, ARM_ARCH_V2,
25905 ARM_CPU_OPT ("arm250", NULL, ARM_ARCH_V2S,
25908 ARM_CPU_OPT ("arm3", NULL, ARM_ARCH_V2S,
25911 ARM_CPU_OPT ("arm6", NULL, ARM_ARCH_V3,
25914 ARM_CPU_OPT ("arm60", NULL, ARM_ARCH_V3,
25917 ARM_CPU_OPT ("arm600", NULL, ARM_ARCH_V3,
25920 ARM_CPU_OPT ("arm610", NULL, ARM_ARCH_V3,
25923 ARM_CPU_OPT ("arm620", NULL, ARM_ARCH_V3,
25926 ARM_CPU_OPT ("arm7", NULL, ARM_ARCH_V3,
25929 ARM_CPU_OPT ("arm7m", NULL, ARM_ARCH_V3M,
25932 ARM_CPU_OPT ("arm7d", NULL, ARM_ARCH_V3,
25935 ARM_CPU_OPT ("arm7dm", NULL, ARM_ARCH_V3M,
25938 ARM_CPU_OPT ("arm7di", NULL, ARM_ARCH_V3,
25941 ARM_CPU_OPT ("arm7dmi", NULL, ARM_ARCH_V3M,
25944 ARM_CPU_OPT ("arm70", NULL, ARM_ARCH_V3,
25947 ARM_CPU_OPT ("arm700", NULL, ARM_ARCH_V3,
25950 ARM_CPU_OPT ("arm700i", NULL, ARM_ARCH_V3,
25953 ARM_CPU_OPT ("arm710", NULL, ARM_ARCH_V3,
25956 ARM_CPU_OPT ("arm710t", NULL, ARM_ARCH_V4T,
25959 ARM_CPU_OPT ("arm720", NULL, ARM_ARCH_V3,
25962 ARM_CPU_OPT ("arm720t", NULL, ARM_ARCH_V4T,
25965 ARM_CPU_OPT ("arm740t", NULL, ARM_ARCH_V4T,
25968 ARM_CPU_OPT ("arm710c", NULL, ARM_ARCH_V3,
25971 ARM_CPU_OPT ("arm7100", NULL, ARM_ARCH_V3,
25974 ARM_CPU_OPT ("arm7500", NULL, ARM_ARCH_V3,
25977 ARM_CPU_OPT ("arm7500fe", NULL, ARM_ARCH_V3,
25980 ARM_CPU_OPT ("arm7t", NULL, ARM_ARCH_V4T,
25983 ARM_CPU_OPT ("arm7tdmi", NULL, ARM_ARCH_V4T,
25986 ARM_CPU_OPT ("arm7tdmi-s", NULL, ARM_ARCH_V4T,
25989 ARM_CPU_OPT ("arm8", NULL, ARM_ARCH_V4,
25992 ARM_CPU_OPT ("arm810", NULL, ARM_ARCH_V4,
25995 ARM_CPU_OPT ("strongarm", NULL, ARM_ARCH_V4,
25998 ARM_CPU_OPT ("strongarm1", NULL, ARM_ARCH_V4,
26001 ARM_CPU_OPT ("strongarm110", NULL, ARM_ARCH_V4,
26004 ARM_CPU_OPT ("strongarm1100", NULL, ARM_ARCH_V4,
26007 ARM_CPU_OPT ("strongarm1110", NULL, ARM_ARCH_V4,
26010 ARM_CPU_OPT ("arm9", NULL, ARM_ARCH_V4T,
26013 ARM_CPU_OPT ("arm920", "ARM920T", ARM_ARCH_V4T,
26016 ARM_CPU_OPT ("arm920t", NULL, ARM_ARCH_V4T,
26019 ARM_CPU_OPT ("arm922t", NULL, ARM_ARCH_V4T,
26022 ARM_CPU_OPT ("arm940t", NULL, ARM_ARCH_V4T,
26025 ARM_CPU_OPT ("arm9tdmi", NULL, ARM_ARCH_V4T,
26028 ARM_CPU_OPT ("fa526", NULL, ARM_ARCH_V4,
26031 ARM_CPU_OPT ("fa626", NULL, ARM_ARCH_V4,
26035 /* For V5 or later processors we default to using VFP; but the user
26036 should really set the FPU type explicitly. */
26037 ARM_CPU_OPT ("arm9e-r0", NULL, ARM_ARCH_V5TExP,
26040 ARM_CPU_OPT ("arm9e", NULL, ARM_ARCH_V5TE,
26043 ARM_CPU_OPT ("arm926ej", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26046 ARM_CPU_OPT ("arm926ejs", "ARM926EJ-S", ARM_ARCH_V5TEJ,
26049 ARM_CPU_OPT ("arm926ej-s", NULL, ARM_ARCH_V5TEJ,
26052 ARM_CPU_OPT ("arm946e-r0", NULL, ARM_ARCH_V5TExP,
26055 ARM_CPU_OPT ("arm946e", "ARM946E-S", ARM_ARCH_V5TE,
26058 ARM_CPU_OPT ("arm946e-s", NULL, ARM_ARCH_V5TE,
26061 ARM_CPU_OPT ("arm966e-r0", NULL, ARM_ARCH_V5TExP,
26064 ARM_CPU_OPT ("arm966e", "ARM966E-S", ARM_ARCH_V5TE,
26067 ARM_CPU_OPT ("arm966e-s", NULL, ARM_ARCH_V5TE,
26070 ARM_CPU_OPT ("arm968e-s", NULL, ARM_ARCH_V5TE,
26073 ARM_CPU_OPT ("arm10t", NULL, ARM_ARCH_V5T,
26076 ARM_CPU_OPT ("arm10tdmi", NULL, ARM_ARCH_V5T,
26079 ARM_CPU_OPT ("arm10e", NULL, ARM_ARCH_V5TE,
26082 ARM_CPU_OPT ("arm1020", "ARM1020E", ARM_ARCH_V5TE,
26085 ARM_CPU_OPT ("arm1020t", NULL, ARM_ARCH_V5T,
26088 ARM_CPU_OPT ("arm1020e", NULL, ARM_ARCH_V5TE,
26091 ARM_CPU_OPT ("arm1022e", NULL, ARM_ARCH_V5TE,
26094 ARM_CPU_OPT ("arm1026ejs", "ARM1026EJ-S", ARM_ARCH_V5TEJ,
26097 ARM_CPU_OPT ("arm1026ej-s", NULL, ARM_ARCH_V5TEJ,
26100 ARM_CPU_OPT ("fa606te", NULL, ARM_ARCH_V5TE,
26103 ARM_CPU_OPT ("fa616te", NULL, ARM_ARCH_V5TE,
26106 ARM_CPU_OPT ("fa626te", NULL, ARM_ARCH_V5TE,
26109 ARM_CPU_OPT ("fmp626", NULL, ARM_ARCH_V5TE,
26112 ARM_CPU_OPT ("fa726te", NULL, ARM_ARCH_V5TE,
26115 ARM_CPU_OPT ("arm1136js", "ARM1136J-S", ARM_ARCH_V6,
26118 ARM_CPU_OPT ("arm1136j-s", NULL, ARM_ARCH_V6,
26121 ARM_CPU_OPT ("arm1136jfs", "ARM1136JF-S", ARM_ARCH_V6,
26124 ARM_CPU_OPT ("arm1136jf-s", NULL, ARM_ARCH_V6,
26127 ARM_CPU_OPT ("mpcore", "MPCore", ARM_ARCH_V6K,
26130 ARM_CPU_OPT ("mpcorenovfp", "MPCore", ARM_ARCH_V6K,
26133 ARM_CPU_OPT ("arm1156t2-s", NULL, ARM_ARCH_V6T2,
26136 ARM_CPU_OPT ("arm1156t2f-s", NULL, ARM_ARCH_V6T2,
26139 ARM_CPU_OPT ("arm1176jz-s", NULL, ARM_ARCH_V6KZ,
26142 ARM_CPU_OPT ("arm1176jzf-s", NULL, ARM_ARCH_V6KZ,
26145 ARM_CPU_OPT ("cortex-a5", "Cortex-A5", ARM_ARCH_V7A,
26146 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26148 ARM_CPU_OPT ("cortex-a7", "Cortex-A7", ARM_ARCH_V7VE,
26150 FPU_ARCH_NEON_VFP_V4),
26151 ARM_CPU_OPT ("cortex-a8", "Cortex-A8", ARM_ARCH_V7A,
26152 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26153 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26154 ARM_CPU_OPT ("cortex-a9", "Cortex-A9", ARM_ARCH_V7A,
26155 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26156 ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
26157 ARM_CPU_OPT ("cortex-a12", "Cortex-A12", ARM_ARCH_V7VE,
26159 FPU_ARCH_NEON_VFP_V4),
26160 ARM_CPU_OPT ("cortex-a15", "Cortex-A15", ARM_ARCH_V7VE,
26162 FPU_ARCH_NEON_VFP_V4),
26163 ARM_CPU_OPT ("cortex-a17", "Cortex-A17", ARM_ARCH_V7VE,
26165 FPU_ARCH_NEON_VFP_V4),
26166 ARM_CPU_OPT ("cortex-a32", "Cortex-A32", ARM_ARCH_V8A,
26167 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26168 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26169 ARM_CPU_OPT ("cortex-a35", "Cortex-A35", ARM_ARCH_V8A,
26170 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26171 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26172 ARM_CPU_OPT ("cortex-a53", "Cortex-A53", ARM_ARCH_V8A,
26173 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26174 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26175 ARM_CPU_OPT ("cortex-a55", "Cortex-A55", ARM_ARCH_V8_2A,
26176 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26177 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26178 ARM_CPU_OPT ("cortex-a57", "Cortex-A57", ARM_ARCH_V8A,
26179 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26180 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26181 ARM_CPU_OPT ("cortex-a72", "Cortex-A72", ARM_ARCH_V8A,
26182 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26183 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26184 ARM_CPU_OPT ("cortex-a73", "Cortex-A73", ARM_ARCH_V8A,
26185 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26186 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26187 ARM_CPU_OPT ("cortex-a75", "Cortex-A75", ARM_ARCH_V8_2A,
26188 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26189 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26190 ARM_CPU_OPT ("cortex-a76", "Cortex-A76", ARM_ARCH_V8_2A,
26191 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26192 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
26193 ARM_CPU_OPT ("cortex-r4", "Cortex-R4", ARM_ARCH_V7R,
26196 ARM_CPU_OPT ("cortex-r4f", "Cortex-R4F", ARM_ARCH_V7R,
26198 FPU_ARCH_VFP_V3D16),
26199 ARM_CPU_OPT ("cortex-r5", "Cortex-R5", ARM_ARCH_V7R,
26200 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26202 ARM_CPU_OPT ("cortex-r7", "Cortex-R7", ARM_ARCH_V7R,
26203 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26204 FPU_ARCH_VFP_V3D16),
26205 ARM_CPU_OPT ("cortex-r8", "Cortex-R8", ARM_ARCH_V7R,
26206 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
26207 FPU_ARCH_VFP_V3D16),
26208 ARM_CPU_OPT ("cortex-r52", "Cortex-R52", ARM_ARCH_V8R,
26209 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26210 FPU_ARCH_NEON_VFP_ARMV8),
26211 ARM_CPU_OPT ("cortex-m33", "Cortex-M33", ARM_ARCH_V8M_MAIN,
26212 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26214 ARM_CPU_OPT ("cortex-m23", "Cortex-M23", ARM_ARCH_V8M_BASE,
26217 ARM_CPU_OPT ("cortex-m7", "Cortex-M7", ARM_ARCH_V7EM,
26220 ARM_CPU_OPT ("cortex-m4", "Cortex-M4", ARM_ARCH_V7EM,
26223 ARM_CPU_OPT ("cortex-m3", "Cortex-M3", ARM_ARCH_V7M,
26226 ARM_CPU_OPT ("cortex-m1", "Cortex-M1", ARM_ARCH_V6SM,
26229 ARM_CPU_OPT ("cortex-m0", "Cortex-M0", ARM_ARCH_V6SM,
26232 ARM_CPU_OPT ("cortex-m0plus", "Cortex-M0+", ARM_ARCH_V6SM,
26235 ARM_CPU_OPT ("exynos-m1", "Samsung Exynos M1", ARM_ARCH_V8A,
26236 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26237 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26239 /* ??? XSCALE is really an architecture. */
26240 ARM_CPU_OPT ("xscale", NULL, ARM_ARCH_XSCALE,
26244 /* ??? iwmmxt is not a processor. */
26245 ARM_CPU_OPT ("iwmmxt", NULL, ARM_ARCH_IWMMXT,
26248 ARM_CPU_OPT ("iwmmxt2", NULL, ARM_ARCH_IWMMXT2,
26251 ARM_CPU_OPT ("i80200", NULL, ARM_ARCH_XSCALE,
26256 ARM_CPU_OPT ("ep9312", "ARM920T",
26257 ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
26258 ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
26260 /* Marvell processors. */
26261 ARM_CPU_OPT ("marvell-pj4", NULL, ARM_ARCH_V7A,
26262 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26263 FPU_ARCH_VFP_V3D16),
26264 ARM_CPU_OPT ("marvell-whitney", NULL, ARM_ARCH_V7A,
26265 ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
26266 FPU_ARCH_NEON_VFP_V4),
26268 /* APM X-Gene family. */
26269 ARM_CPU_OPT ("xgene1", "APM X-Gene 1", ARM_ARCH_V8A,
26271 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26272 ARM_CPU_OPT ("xgene2", "APM X-Gene 2", ARM_ARCH_V8A,
26273 ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26274 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
26276 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
26280 struct arm_arch_option_table
26284 const arm_feature_set value;
26285 const arm_feature_set default_fpu;
26288 /* This list should, at a minimum, contain all the architecture names
26289 recognized by GCC. */
26290 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF }
26292 static const struct arm_arch_option_table arm_archs[] =
26294 ARM_ARCH_OPT ("all", ARM_ANY, FPU_ARCH_FPA),
26295 ARM_ARCH_OPT ("armv1", ARM_ARCH_V1, FPU_ARCH_FPA),
26296 ARM_ARCH_OPT ("armv2", ARM_ARCH_V2, FPU_ARCH_FPA),
26297 ARM_ARCH_OPT ("armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA),
26298 ARM_ARCH_OPT ("armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA),
26299 ARM_ARCH_OPT ("armv3", ARM_ARCH_V3, FPU_ARCH_FPA),
26300 ARM_ARCH_OPT ("armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA),
26301 ARM_ARCH_OPT ("armv4", ARM_ARCH_V4, FPU_ARCH_FPA),
26302 ARM_ARCH_OPT ("armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA),
26303 ARM_ARCH_OPT ("armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA),
26304 ARM_ARCH_OPT ("armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA),
26305 ARM_ARCH_OPT ("armv5", ARM_ARCH_V5, FPU_ARCH_VFP),
26306 ARM_ARCH_OPT ("armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP),
26307 ARM_ARCH_OPT ("armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP),
26308 ARM_ARCH_OPT ("armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP),
26309 ARM_ARCH_OPT ("armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP),
26310 ARM_ARCH_OPT ("armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP),
26311 ARM_ARCH_OPT ("armv6", ARM_ARCH_V6, FPU_ARCH_VFP),
26312 ARM_ARCH_OPT ("armv6j", ARM_ARCH_V6, FPU_ARCH_VFP),
26313 ARM_ARCH_OPT ("armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP),
26314 ARM_ARCH_OPT ("armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP),
26315 /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
26316 kept to preserve existing behaviour. */
26317 ARM_ARCH_OPT ("armv6kz", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26318 ARM_ARCH_OPT ("armv6zk", ARM_ARCH_V6KZ, FPU_ARCH_VFP),
26319 ARM_ARCH_OPT ("armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP),
26320 ARM_ARCH_OPT ("armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP),
26321 ARM_ARCH_OPT ("armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP),
26322 /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
26323 kept to preserve existing behaviour. */
26324 ARM_ARCH_OPT ("armv6kzt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26325 ARM_ARCH_OPT ("armv6zkt2", ARM_ARCH_V6KZT2, FPU_ARCH_VFP),
26326 ARM_ARCH_OPT ("armv6-m", ARM_ARCH_V6M, FPU_ARCH_VFP),
26327 ARM_ARCH_OPT ("armv6s-m", ARM_ARCH_V6SM, FPU_ARCH_VFP),
26328 ARM_ARCH_OPT ("armv7", ARM_ARCH_V7, FPU_ARCH_VFP),
26329 /* The official spelling of the ARMv7 profile variants is the dashed form.
26330 Accept the non-dashed form for compatibility with old toolchains. */
26331 ARM_ARCH_OPT ("armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26332 ARM_ARCH_OPT ("armv7ve", ARM_ARCH_V7VE, FPU_ARCH_VFP),
26333 ARM_ARCH_OPT ("armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26334 ARM_ARCH_OPT ("armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26335 ARM_ARCH_OPT ("armv7-a", ARM_ARCH_V7A, FPU_ARCH_VFP),
26336 ARM_ARCH_OPT ("armv7-r", ARM_ARCH_V7R, FPU_ARCH_VFP),
26337 ARM_ARCH_OPT ("armv7-m", ARM_ARCH_V7M, FPU_ARCH_VFP),
26338 ARM_ARCH_OPT ("armv7e-m", ARM_ARCH_V7EM, FPU_ARCH_VFP),
26339 ARM_ARCH_OPT ("armv8-m.base", ARM_ARCH_V8M_BASE, FPU_ARCH_VFP),
26340 ARM_ARCH_OPT ("armv8-m.main", ARM_ARCH_V8M_MAIN, FPU_ARCH_VFP),
26341 ARM_ARCH_OPT ("armv8-a", ARM_ARCH_V8A, FPU_ARCH_VFP),
26342 ARM_ARCH_OPT ("armv8.1-a", ARM_ARCH_V8_1A, FPU_ARCH_VFP),
26343 ARM_ARCH_OPT ("armv8.2-a", ARM_ARCH_V8_2A, FPU_ARCH_VFP),
26344 ARM_ARCH_OPT ("armv8.3-a", ARM_ARCH_V8_3A, FPU_ARCH_VFP),
26345 ARM_ARCH_OPT ("armv8-r", ARM_ARCH_V8R, FPU_ARCH_VFP),
26346 ARM_ARCH_OPT ("armv8.4-a", ARM_ARCH_V8_4A, FPU_ARCH_VFP),
26347 ARM_ARCH_OPT ("armv8.5-a", ARM_ARCH_V8_5A, FPU_ARCH_VFP),
26348 ARM_ARCH_OPT ("xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP),
26349 ARM_ARCH_OPT ("iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP),
26350 ARM_ARCH_OPT ("iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP),
26351 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
26353 #undef ARM_ARCH_OPT
26355 /* ISA extensions in the co-processor and main instruction set space. */
26357 struct arm_option_extension_value_table
26361 const arm_feature_set merge_value;
26362 const arm_feature_set clear_value;
26363 /* List of architectures for which an extension is available. ARM_ARCH_NONE
26364 indicates that an extension is available for all architectures while
26365 ARM_ANY marks an empty entry. */
26366 const arm_feature_set allowed_archs[2];
26369 /* The following table must be in alphabetical order with a NULL last entry. */
26371 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
26372 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
26374 static const struct arm_option_extension_value_table arm_extensions[] =
26376 ARM_EXT_OPT ("crc", ARCH_CRC_ARMV8, ARM_FEATURE_COPROC (CRC_EXT_ARMV8),
26377 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26378 ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
26379 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
26380 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26381 ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
26382 ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
26384 ARM_EXT_OPT ("dsp", ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26385 ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
26386 ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
26387 ARM_EXT_OPT ("fp", FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
26388 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26389 ARM_EXT_OPT ("fp16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26390 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
26392 ARM_EXT_OPT ("fp16fml", ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26393 | ARM_EXT2_FP16_FML),
26394 ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
26395 | ARM_EXT2_FP16_FML),
26397 ARM_EXT_OPT2 ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26398 ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
26399 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26400 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26401 /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
26402 Thumb divide instruction. Due to this having the same name as the
26403 previous entry, this will be ignored when doing command-line parsing and
26404 only considered by build attribute selection code. */
26405 ARM_EXT_OPT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26406 ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
26407 ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
26408 ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
26409 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
26410 ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
26411 ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
26412 ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
26413 ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
26414 ARM_EXT_OPT2 ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26415 ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
26416 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
26417 ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
26418 ARM_EXT_OPT ("os", ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26419 ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
26420 ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
26421 ARM_EXT_OPT ("pan", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
26422 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
26423 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26424 ARM_EXT_OPT ("ras", ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
26425 ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
26426 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26427 ARM_EXT_OPT ("rdma", FPU_ARCH_NEON_VFP_ARMV8_1,
26428 ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
26429 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
26430 ARM_EXT_OPT ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
26431 ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
26433 ARM_EXT_OPT2 ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26434 ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
26435 ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
26436 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26437 ARM_EXT_OPT ("simd", FPU_ARCH_NEON_VFP_ARMV8,
26438 ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
26439 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
26440 ARM_EXT_OPT ("virt", ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
26442 ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
26443 ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
26444 ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
26445 ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
26446 { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
26450 /* ISA floating-point and Advanced SIMD extensions. */
26451 struct arm_option_fpu_value_table
26454 const arm_feature_set value;
26457 /* This list should, at a minimum, contain all the fpu names
26458 recognized by GCC. */
26459 static const struct arm_option_fpu_value_table arm_fpus[] =
26461 {"softfpa", FPU_NONE},
26462 {"fpe", FPU_ARCH_FPE},
26463 {"fpe2", FPU_ARCH_FPE},
26464 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
26465 {"fpa", FPU_ARCH_FPA},
26466 {"fpa10", FPU_ARCH_FPA},
26467 {"fpa11", FPU_ARCH_FPA},
26468 {"arm7500fe", FPU_ARCH_FPA},
26469 {"softvfp", FPU_ARCH_VFP},
26470 {"softvfp+vfp", FPU_ARCH_VFP_V2},
26471 {"vfp", FPU_ARCH_VFP_V2},
26472 {"vfp9", FPU_ARCH_VFP_V2},
26473 {"vfp3", FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3. */
26474 {"vfp10", FPU_ARCH_VFP_V2},
26475 {"vfp10-r0", FPU_ARCH_VFP_V1},
26476 {"vfpxd", FPU_ARCH_VFP_V1xD},
26477 {"vfpv2", FPU_ARCH_VFP_V2},
26478 {"vfpv3", FPU_ARCH_VFP_V3},
26479 {"vfpv3-fp16", FPU_ARCH_VFP_V3_FP16},
26480 {"vfpv3-d16", FPU_ARCH_VFP_V3D16},
26481 {"vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16},
26482 {"vfpv3xd", FPU_ARCH_VFP_V3xD},
26483 {"vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16},
26484 {"arm1020t", FPU_ARCH_VFP_V1},
26485 {"arm1020e", FPU_ARCH_VFP_V2},
26486 {"arm1136jfs", FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s. */
26487 {"arm1136jf-s", FPU_ARCH_VFP_V2},
26488 {"maverick", FPU_ARCH_MAVERICK},
26489 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26490 {"neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
26491 {"neon-fp16", FPU_ARCH_NEON_FP16},
26492 {"vfpv4", FPU_ARCH_VFP_V4},
26493 {"vfpv4-d16", FPU_ARCH_VFP_V4D16},
26494 {"fpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16},
26495 {"fpv5-d16", FPU_ARCH_VFP_V5D16},
26496 {"fpv5-sp-d16", FPU_ARCH_VFP_V5_SP_D16},
26497 {"neon-vfpv4", FPU_ARCH_NEON_VFP_V4},
26498 {"fp-armv8", FPU_ARCH_VFP_ARMV8},
26499 {"neon-fp-armv8", FPU_ARCH_NEON_VFP_ARMV8},
26500 {"crypto-neon-fp-armv8",
26501 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
26502 {"neon-fp-armv8.1", FPU_ARCH_NEON_VFP_ARMV8_1},
26503 {"crypto-neon-fp-armv8.1",
26504 FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
26505 {NULL, ARM_ARCH_NONE}
26508 struct arm_option_value_table
26514 static const struct arm_option_value_table arm_float_abis[] =
26516 {"hard", ARM_FLOAT_ABI_HARD},
26517 {"softfp", ARM_FLOAT_ABI_SOFTFP},
26518 {"soft", ARM_FLOAT_ABI_SOFT},
26523 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
26524 static const struct arm_option_value_table arm_eabis[] =
26526 {"gnu", EF_ARM_EABI_UNKNOWN},
26527 {"4", EF_ARM_EABI_VER4},
26528 {"5", EF_ARM_EABI_VER5},
26533 struct arm_long_option_table
26535 const char * option; /* Substring to match. */
26536 const char * help; /* Help information. */
26537 int (* func) (const char * subopt); /* Function to decode sub-option. */
26538 const char * deprecated; /* If non-null, print this message. */
26542 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
26543 arm_feature_set *ext_set)
26545 /* We insist on extensions being specified in alphabetical order, and with
26546 extensions being added before being removed. We achieve this by having
26547 the global ARM_EXTENSIONS table in alphabetical order, and using the
26548 ADDING_VALUE variable to indicate whether we are adding an extension (1)
26549 or removing it (0) and only allowing it to change in the order
26551 const struct arm_option_extension_value_table * opt = NULL;
26552 const arm_feature_set arm_any = ARM_ANY;
26553 int adding_value = -1;
26555 while (str != NULL && *str != 0)
26562 as_bad (_("invalid architectural extension"));
26567 ext = strchr (str, '+');
26572 len = strlen (str);
26574 if (len >= 2 && strncmp (str, "no", 2) == 0)
26576 if (adding_value != 0)
26579 opt = arm_extensions;
26587 if (adding_value == -1)
26590 opt = arm_extensions;
26592 else if (adding_value != 1)
26594 as_bad (_("must specify extensions to add before specifying "
26595 "those to remove"));
26602 as_bad (_("missing architectural extension"));
26606 gas_assert (adding_value != -1);
26607 gas_assert (opt != NULL);
26609 /* Scan over the options table trying to find an exact match. */
26610 for (; opt->name != NULL; opt++)
26611 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26613 int i, nb_allowed_archs =
26614 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
26615 /* Check we can apply the extension to this architecture. */
26616 for (i = 0; i < nb_allowed_archs; i++)
26619 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
26621 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
26624 if (i == nb_allowed_archs)
26626 as_bad (_("extension does not apply to the base architecture"));
26630 /* Add or remove the extension. */
26632 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
26634 ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
26636 /* Allowing Thumb division instructions for ARMv7 in autodetection
26637 rely on this break so that duplicate extensions (extensions
26638 with the same name as a previous extension in the list) are not
26639 considered for command-line parsing. */
26643 if (opt->name == NULL)
26645 /* Did we fail to find an extension because it wasn't specified in
26646 alphabetical order, or because it does not exist? */
26648 for (opt = arm_extensions; opt->name != NULL; opt++)
26649 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26652 if (opt->name == NULL)
26653 as_bad (_("unknown architectural extension `%s'"), str);
26655 as_bad (_("architectural extensions must be specified in "
26656 "alphabetical order"));
26662 /* We should skip the extension we've just matched the next time
26674 arm_parse_cpu (const char *str)
26676 const struct arm_cpu_option_table *opt;
26677 const char *ext = strchr (str, '+');
26683 len = strlen (str);
26687 as_bad (_("missing cpu name `%s'"), str);
26691 for (opt = arm_cpus; opt->name != NULL; opt++)
26692 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26694 mcpu_cpu_opt = &opt->value;
26695 if (mcpu_ext_opt == NULL)
26696 mcpu_ext_opt = XNEW (arm_feature_set);
26697 *mcpu_ext_opt = opt->ext;
26698 mcpu_fpu_opt = &opt->default_fpu;
26699 if (opt->canonical_name)
26701 gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
26702 strcpy (selected_cpu_name, opt->canonical_name);
26708 if (len >= sizeof selected_cpu_name)
26709 len = (sizeof selected_cpu_name) - 1;
26711 for (i = 0; i < len; i++)
26712 selected_cpu_name[i] = TOUPPER (opt->name[i]);
26713 selected_cpu_name[i] = 0;
26717 return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt);
26722 as_bad (_("unknown cpu `%s'"), str);
26727 arm_parse_arch (const char *str)
26729 const struct arm_arch_option_table *opt;
26730 const char *ext = strchr (str, '+');
26736 len = strlen (str);
26740 as_bad (_("missing architecture name `%s'"), str);
26744 for (opt = arm_archs; opt->name != NULL; opt++)
26745 if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
26747 march_cpu_opt = &opt->value;
26748 if (march_ext_opt == NULL)
26749 march_ext_opt = XNEW (arm_feature_set);
26750 *march_ext_opt = arm_arch_none;
26751 march_fpu_opt = &opt->default_fpu;
26752 strcpy (selected_cpu_name, opt->name);
26755 return arm_parse_extension (ext, march_cpu_opt, march_ext_opt);
26760 as_bad (_("unknown architecture `%s'\n"), str);
26765 arm_parse_fpu (const char * str)
26767 const struct arm_option_fpu_value_table * opt;
26769 for (opt = arm_fpus; opt->name != NULL; opt++)
26770 if (streq (opt->name, str))
26772 mfpu_opt = &opt->value;
26776 as_bad (_("unknown floating point format `%s'\n"), str);
26781 arm_parse_float_abi (const char * str)
26783 const struct arm_option_value_table * opt;
26785 for (opt = arm_float_abis; opt->name != NULL; opt++)
26786 if (streq (opt->name, str))
26788 mfloat_abi_opt = opt->value;
26792 as_bad (_("unknown floating point abi `%s'\n"), str);
26798 arm_parse_eabi (const char * str)
26800 const struct arm_option_value_table *opt;
26802 for (opt = arm_eabis; opt->name != NULL; opt++)
26803 if (streq (opt->name, str))
26805 meabi_flags = opt->value;
26808 as_bad (_("unknown EABI `%s'\n"), str);
26814 arm_parse_it_mode (const char * str)
26816 bfd_boolean ret = TRUE;
26818 if (streq ("arm", str))
26819 implicit_it_mode = IMPLICIT_IT_MODE_ARM;
26820 else if (streq ("thumb", str))
26821 implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
26822 else if (streq ("always", str))
26823 implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
26824 else if (streq ("never", str))
26825 implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
26828 as_bad (_("unknown implicit IT mode `%s', should be "\
26829 "arm, thumb, always, or never."), str);
26837 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
26839 codecomposer_syntax = TRUE;
26840 arm_comment_chars[0] = ';';
26841 arm_line_separator_chars[0] = 0;
26845 struct arm_long_option_table arm_long_opts[] =
26847 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
26848 arm_parse_cpu, NULL},
26849 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
26850 arm_parse_arch, NULL},
26851 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
26852 arm_parse_fpu, NULL},
26853 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
26854 arm_parse_float_abi, NULL},
26856 {"meabi=", N_("<ver>\t\t assemble for eabi version <ver>"),
26857 arm_parse_eabi, NULL},
26859 {"mimplicit-it=", N_("<mode>\t controls implicit insertion of IT instructions"),
26860 arm_parse_it_mode, NULL},
26861 {"mccs", N_("\t\t\t TI CodeComposer Studio syntax compatibility mode"),
26862 arm_ccs_mode, NULL},
26863 {NULL, NULL, 0, NULL}
26867 md_parse_option (int c, const char * arg)
26869 struct arm_option_table *opt;
26870 const struct arm_legacy_option_table *fopt;
26871 struct arm_long_option_table *lopt;
26877 target_big_endian = 1;
26883 target_big_endian = 0;
26887 case OPTION_FIX_V4BX:
26895 #endif /* OBJ_ELF */
26898 /* Listing option. Just ignore these, we don't support additional
26903 for (opt = arm_opts; opt->option != NULL; opt++)
26905 if (c == opt->option[0]
26906 && ((arg == NULL && opt->option[1] == 0)
26907 || streq (arg, opt->option + 1)))
26909 /* If the option is deprecated, tell the user. */
26910 if (warn_on_deprecated && opt->deprecated != NULL)
26911 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26912 arg ? arg : "", _(opt->deprecated));
26914 if (opt->var != NULL)
26915 *opt->var = opt->value;
26921 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
26923 if (c == fopt->option[0]
26924 && ((arg == NULL && fopt->option[1] == 0)
26925 || streq (arg, fopt->option + 1)))
26927 /* If the option is deprecated, tell the user. */
26928 if (warn_on_deprecated && fopt->deprecated != NULL)
26929 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
26930 arg ? arg : "", _(fopt->deprecated));
26932 if (fopt->var != NULL)
26933 *fopt->var = &fopt->value;
26939 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26941 /* These options are expected to have an argument. */
26942 if (c == lopt->option[0]
26944 && strncmp (arg, lopt->option + 1,
26945 strlen (lopt->option + 1)) == 0)
26947 /* If the option is deprecated, tell the user. */
26948 if (warn_on_deprecated && lopt->deprecated != NULL)
26949 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
26950 _(lopt->deprecated));
26952 /* Call the sup-option parser. */
26953 return lopt->func (arg + strlen (lopt->option) - 1);
26964 md_show_usage (FILE * fp)
26966 struct arm_option_table *opt;
26967 struct arm_long_option_table *lopt;
26969 fprintf (fp, _(" ARM-specific assembler options:\n"));
26971 for (opt = arm_opts; opt->option != NULL; opt++)
26972 if (opt->help != NULL)
26973 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
26975 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
26976 if (lopt->help != NULL)
26977 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
26981 -EB assemble code for a big-endian cpu\n"));
26986 -EL assemble code for a little-endian cpu\n"));
26990 --fix-v4bx Allow BX in ARMv4 code\n"));
26994 --fdpic generate an FDPIC object file\n"));
26995 #endif /* OBJ_ELF */
27003 arm_feature_set flags;
27004 } cpu_arch_ver_table;
27006 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
27007 chronologically for architectures, with an exception for ARMv6-M and
27008 ARMv6S-M due to legacy reasons. No new architecture should have a
27009 special case. This allows for build attribute selection results to be
27010 stable when new architectures are added. */
27011 static const cpu_arch_ver_table cpu_arch_ver[] =
27013 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V1},
27014 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2},
27015 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V2S},
27016 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3},
27017 {TAG_CPU_ARCH_PRE_V4, ARM_ARCH_V3M},
27018 {TAG_CPU_ARCH_V4, ARM_ARCH_V4xM},
27019 {TAG_CPU_ARCH_V4, ARM_ARCH_V4},
27020 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4TxM},
27021 {TAG_CPU_ARCH_V4T, ARM_ARCH_V4T},
27022 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5xM},
27023 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5},
27024 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5TxM},
27025 {TAG_CPU_ARCH_V5T, ARM_ARCH_V5T},
27026 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TExP},
27027 {TAG_CPU_ARCH_V5TE, ARM_ARCH_V5TE},
27028 {TAG_CPU_ARCH_V5TEJ, ARM_ARCH_V5TEJ},
27029 {TAG_CPU_ARCH_V6, ARM_ARCH_V6},
27030 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6Z},
27031 {TAG_CPU_ARCH_V6KZ, ARM_ARCH_V6KZ},
27032 {TAG_CPU_ARCH_V6K, ARM_ARCH_V6K},
27033 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6T2},
27034 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KT2},
27035 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6ZT2},
27036 {TAG_CPU_ARCH_V6T2, ARM_ARCH_V6KZT2},
27038 /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
27039 always selected build attributes to match those of ARMv6-M
27040 (resp. ARMv6S-M). However, due to these architectures being a strict
27041 subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
27042 would be selected when fully respecting chronology of architectures.
27043 It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
27044 move them before ARMv7 architectures. */
27045 {TAG_CPU_ARCH_V6_M, ARM_ARCH_V6M},
27046 {TAG_CPU_ARCH_V6S_M, ARM_ARCH_V6SM},
27048 {TAG_CPU_ARCH_V7, ARM_ARCH_V7},
27049 {TAG_CPU_ARCH_V7, ARM_ARCH_V7A},
27050 {TAG_CPU_ARCH_V7, ARM_ARCH_V7R},
27051 {TAG_CPU_ARCH_V7, ARM_ARCH_V7M},
27052 {TAG_CPU_ARCH_V7, ARM_ARCH_V7VE},
27053 {TAG_CPU_ARCH_V7E_M, ARM_ARCH_V7EM},
27054 {TAG_CPU_ARCH_V8, ARM_ARCH_V8A},
27055 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_1A},
27056 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_2A},
27057 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_3A},
27058 {TAG_CPU_ARCH_V8M_BASE, ARM_ARCH_V8M_BASE},
27059 {TAG_CPU_ARCH_V8M_MAIN, ARM_ARCH_V8M_MAIN},
27060 {TAG_CPU_ARCH_V8R, ARM_ARCH_V8R},
27061 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_4A},
27062 {TAG_CPU_ARCH_V8, ARM_ARCH_V8_5A},
27063 {-1, ARM_ARCH_NONE}
27066 /* Set an attribute if it has not already been set by the user. */
27069 aeabi_set_attribute_int (int tag, int value)
27072 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27073 || !attributes_set_explicitly[tag])
27074 bfd_elf_add_proc_attr_int (stdoutput, tag, value);
27078 aeabi_set_attribute_string (int tag, const char *value)
27081 || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
27082 || !attributes_set_explicitly[tag])
27083 bfd_elf_add_proc_attr_string (stdoutput, tag, value);
27086 /* Return whether features in the *NEEDED feature set are available via
27087 extensions for the architecture whose feature set is *ARCH_FSET. */
27090 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
27091 const arm_feature_set *needed)
27093 int i, nb_allowed_archs;
27094 arm_feature_set ext_fset;
27095 const struct arm_option_extension_value_table *opt;
27097 ext_fset = arm_arch_none;
27098 for (opt = arm_extensions; opt->name != NULL; opt++)
27100 /* Extension does not provide any feature we need. */
27101 if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
27105 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
27106 for (i = 0; i < nb_allowed_archs; i++)
27109 if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
27112 /* Extension is available, add it. */
27113 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
27114 ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
27118 /* Can we enable all features in *needed? */
27119 return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
27122 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
27123 a given architecture feature set *ARCH_EXT_FSET including extension feature
27124 set *EXT_FSET. Selection logic used depend on EXACT_MATCH:
27125 - if true, check for an exact match of the architecture modulo extensions;
27126 - otherwise, select build attribute value of the first superset
27127 architecture released so that results remains stable when new architectures
27129 For -march/-mcpu=all the build attribute value of the most featureful
27130 architecture is returned. Tag_CPU_arch_profile result is returned in
27134 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
27135 const arm_feature_set *ext_fset,
27136 char *profile, int exact_match)
27138 arm_feature_set arch_fset;
27139 const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
27141 /* Select most featureful architecture with all its extensions if building
27142 for -march=all as the feature sets used to set build attributes. */
27143 if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
27145 /* Force revisiting of decision for each new architecture. */
27146 gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8M_MAIN);
27148 return TAG_CPU_ARCH_V8;
27151 ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
27153 for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
27155 arm_feature_set known_arch_fset;
27157 ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
27160 /* Base architecture match user-specified architecture and
27161 extensions, eg. ARMv6S-M matching -march=armv6-m+os. */
27162 if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
27167 /* Base architecture match user-specified architecture only
27168 (eg. ARMv6-M in the same case as above). Record it in case we
27169 find a match with above condition. */
27170 else if (p_ver_ret == NULL
27171 && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
27177 /* Architecture has all features wanted. */
27178 if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
27180 arm_feature_set added_fset;
27182 /* Compute features added by this architecture over the one
27183 recorded in p_ver_ret. */
27184 if (p_ver_ret != NULL)
27185 ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
27187 /* First architecture that match incl. with extensions, or the
27188 only difference in features over the recorded match is
27189 features that were optional and are now mandatory. */
27190 if (p_ver_ret == NULL
27191 || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
27197 else if (p_ver_ret == NULL)
27199 arm_feature_set needed_ext_fset;
27201 ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
27203 /* Architecture has all features needed when using some
27204 extensions. Record it and continue searching in case there
27205 exist an architecture providing all needed features without
27206 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
27208 if (have_ext_for_needed_feat_p (&known_arch_fset,
27215 if (p_ver_ret == NULL)
27219 /* Tag_CPU_arch_profile. */
27220 if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
27221 || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
27222 || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
27223 && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only)))
27225 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r))
27227 else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
27231 return p_ver_ret->val;
27234 /* Set the public EABI object attributes. */
27237 aeabi_set_public_attributes (void)
27239 char profile = '\0';
27242 int fp16_optional = 0;
27243 int skip_exact_match = 0;
27244 arm_feature_set flags, flags_arch, flags_ext;
27246 /* Autodetection mode, choose the architecture based the instructions
27248 if (no_cpu_selected ())
27250 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
27252 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
27253 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
27255 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
27256 ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
27258 /* Code run during relaxation relies on selected_cpu being set. */
27259 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27260 flags_ext = arm_arch_none;
27261 ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
27262 selected_ext = flags_ext;
27263 selected_cpu = flags;
27265 /* Otherwise, choose the architecture based on the capabilities of the
27269 ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
27270 ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
27271 flags_ext = selected_ext;
27272 flags = selected_cpu;
27274 ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
27276 /* Allow the user to override the reported architecture. */
27277 if (!ARM_FEATURE_ZERO (selected_object_arch))
27279 ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
27280 flags_ext = arm_arch_none;
27283 skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
27285 /* When this function is run again after relaxation has happened there is no
27286 way to determine whether an architecture or CPU was specified by the user:
27287 - selected_cpu is set above for relaxation to work;
27288 - march_cpu_opt is not set if only -mcpu or .cpu is used;
27289 - mcpu_cpu_opt is set to arm_arch_any for autodetection.
27290 Therefore, if not in -march=all case we first try an exact match and fall
27291 back to autodetection. */
27292 if (!skip_exact_match)
27293 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
27295 arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
27297 as_bad (_("no architecture contains all the instructions used\n"));
27299 /* Tag_CPU_name. */
27300 if (selected_cpu_name[0])
27304 q = selected_cpu_name;
27305 if (strncmp (q, "armv", 4) == 0)
27310 for (i = 0; q[i]; i++)
27311 q[i] = TOUPPER (q[i]);
27313 aeabi_set_attribute_string (Tag_CPU_name, q);
27316 /* Tag_CPU_arch. */
27317 aeabi_set_attribute_int (Tag_CPU_arch, arch);
27319 /* Tag_CPU_arch_profile. */
27320 if (profile != '\0')
27321 aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
27323 /* Tag_DSP_extension. */
27324 if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
27325 aeabi_set_attribute_int (Tag_DSP_extension, 1);
27327 ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
27328 /* Tag_ARM_ISA_use. */
27329 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
27330 || ARM_FEATURE_ZERO (flags_arch))
27331 aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
27333 /* Tag_THUMB_ISA_use. */
27334 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
27335 || ARM_FEATURE_ZERO (flags_arch))
27339 if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27340 && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
27342 else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
27346 aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
27349 /* Tag_VFP_arch. */
27350 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
27351 aeabi_set_attribute_int (Tag_VFP_arch,
27352 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27354 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
27355 aeabi_set_attribute_int (Tag_VFP_arch,
27356 ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
27358 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
27361 aeabi_set_attribute_int (Tag_VFP_arch, 3);
27363 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
27365 aeabi_set_attribute_int (Tag_VFP_arch, 4);
27368 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
27369 aeabi_set_attribute_int (Tag_VFP_arch, 2);
27370 else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
27371 || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
27372 aeabi_set_attribute_int (Tag_VFP_arch, 1);
27374 /* Tag_ABI_HardFP_use. */
27375 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
27376 && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
27377 aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
27379 /* Tag_WMMX_arch. */
27380 if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
27381 aeabi_set_attribute_int (Tag_WMMX_arch, 2);
27382 else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
27383 aeabi_set_attribute_int (Tag_WMMX_arch, 1);
27385 /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch). */
27386 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
27387 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
27388 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
27389 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
27390 else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
27392 if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
27394 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
27398 aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
27403 /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch). */
27404 if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
27405 aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
27409 We set Tag_DIV_use to two when integer divide instructions have been used
27410 in ARM state, or when Thumb integer divide instructions have been used,
27411 but we have no architecture profile set, nor have we any ARM instructions.
27413 For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
27414 by the base architecture.
27416 For new architectures we will have to check these tests. */
27417 gas_assert (arch <= TAG_CPU_ARCH_V8M_MAIN);
27418 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
27419 || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
27420 aeabi_set_attribute_int (Tag_DIV_use, 0);
27421 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
27422 || (profile == '\0'
27423 && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
27424 && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
27425 aeabi_set_attribute_int (Tag_DIV_use, 2);
27427 /* Tag_MP_extension_use. */
27428 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
27429 aeabi_set_attribute_int (Tag_MPextension_use, 1);
27431 /* Tag Virtualization_use. */
27432 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
27434 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
27437 aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
27440 /* Post relaxation hook. Recompute ARM attributes now that relaxation is
27441 finished and free extension feature bits which will not be used anymore. */
27444 arm_md_post_relax (void)
27446 aeabi_set_public_attributes ();
27447 XDELETE (mcpu_ext_opt);
27448 mcpu_ext_opt = NULL;
27449 XDELETE (march_ext_opt);
27450 march_ext_opt = NULL;
27453 /* Add the default contents for the .ARM.attributes section. */
27458 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
27461 aeabi_set_public_attributes ();
27463 #endif /* OBJ_ELF */
27465 /* Parse a .cpu directive. */
27468 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
27470 const struct arm_cpu_option_table *opt;
27474 name = input_line_pointer;
27475 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27476 input_line_pointer++;
27477 saved_char = *input_line_pointer;
27478 *input_line_pointer = 0;
27480 /* Skip the first "all" entry. */
27481 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
27482 if (streq (opt->name, name))
27484 selected_arch = opt->value;
27485 selected_ext = opt->ext;
27486 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27487 if (opt->canonical_name)
27488 strcpy (selected_cpu_name, opt->canonical_name);
27492 for (i = 0; opt->name[i]; i++)
27493 selected_cpu_name[i] = TOUPPER (opt->name[i]);
27495 selected_cpu_name[i] = 0;
27497 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27499 *input_line_pointer = saved_char;
27500 demand_empty_rest_of_line ();
27503 as_bad (_("unknown cpu `%s'"), name);
27504 *input_line_pointer = saved_char;
27505 ignore_rest_of_line ();
27508 /* Parse a .arch directive. */
27511 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
27513 const struct arm_arch_option_table *opt;
27517 name = input_line_pointer;
27518 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27519 input_line_pointer++;
27520 saved_char = *input_line_pointer;
27521 *input_line_pointer = 0;
27523 /* Skip the first "all" entry. */
27524 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27525 if (streq (opt->name, name))
27527 selected_arch = opt->value;
27528 selected_ext = arm_arch_none;
27529 selected_cpu = selected_arch;
27530 strcpy (selected_cpu_name, opt->name);
27531 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27532 *input_line_pointer = saved_char;
27533 demand_empty_rest_of_line ();
27537 as_bad (_("unknown architecture `%s'\n"), name);
27538 *input_line_pointer = saved_char;
27539 ignore_rest_of_line ();
27542 /* Parse a .object_arch directive. */
27545 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
27547 const struct arm_arch_option_table *opt;
27551 name = input_line_pointer;
27552 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27553 input_line_pointer++;
27554 saved_char = *input_line_pointer;
27555 *input_line_pointer = 0;
27557 /* Skip the first "all" entry. */
27558 for (opt = arm_archs + 1; opt->name != NULL; opt++)
27559 if (streq (opt->name, name))
27561 selected_object_arch = opt->value;
27562 *input_line_pointer = saved_char;
27563 demand_empty_rest_of_line ();
27567 as_bad (_("unknown architecture `%s'\n"), name);
27568 *input_line_pointer = saved_char;
27569 ignore_rest_of_line ();
27572 /* Parse a .arch_extension directive. */
27575 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
27577 const struct arm_option_extension_value_table *opt;
27580 int adding_value = 1;
27582 name = input_line_pointer;
27583 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27584 input_line_pointer++;
27585 saved_char = *input_line_pointer;
27586 *input_line_pointer = 0;
27588 if (strlen (name) >= 2
27589 && strncmp (name, "no", 2) == 0)
27595 for (opt = arm_extensions; opt->name != NULL; opt++)
27596 if (streq (opt->name, name))
27598 int i, nb_allowed_archs =
27599 sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
27600 for (i = 0; i < nb_allowed_archs; i++)
27603 if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
27605 if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
27609 if (i == nb_allowed_archs)
27611 as_bad (_("architectural extension `%s' is not allowed for the "
27612 "current base architecture"), name);
27617 ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
27620 ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
27622 ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
27623 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27624 *input_line_pointer = saved_char;
27625 demand_empty_rest_of_line ();
27626 /* Allowing Thumb division instructions for ARMv7 in autodetection rely
27627 on this return so that duplicate extensions (extensions with the
27628 same name as a previous extension in the list) are not considered
27629 for command-line parsing. */
27633 if (opt->name == NULL)
27634 as_bad (_("unknown architecture extension `%s'\n"), name);
27636 *input_line_pointer = saved_char;
27637 ignore_rest_of_line ();
27640 /* Parse a .fpu directive. */
27643 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
27645 const struct arm_option_fpu_value_table *opt;
27649 name = input_line_pointer;
27650 while (*input_line_pointer && !ISSPACE (*input_line_pointer))
27651 input_line_pointer++;
27652 saved_char = *input_line_pointer;
27653 *input_line_pointer = 0;
27655 for (opt = arm_fpus; opt->name != NULL; opt++)
27656 if (streq (opt->name, name))
27658 selected_fpu = opt->value;
27659 #ifndef CPU_DEFAULT
27660 if (no_cpu_selected ())
27661 ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
27664 ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
27665 *input_line_pointer = saved_char;
27666 demand_empty_rest_of_line ();
27670 as_bad (_("unknown floating point format `%s'\n"), name);
27671 *input_line_pointer = saved_char;
27672 ignore_rest_of_line ();
27675 /* Copy symbol information. */
27678 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
27680 ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
27684 /* Given a symbolic attribute NAME, return the proper integer value.
27685 Returns -1 if the attribute is not known. */
27688 arm_convert_symbolic_attribute (const char *name)
27690 static const struct
27695 attribute_table[] =
27697 /* When you modify this table you should
27698 also modify the list in doc/c-arm.texi. */
27699 #define T(tag) {#tag, tag}
27700 T (Tag_CPU_raw_name),
27703 T (Tag_CPU_arch_profile),
27704 T (Tag_ARM_ISA_use),
27705 T (Tag_THUMB_ISA_use),
27709 T (Tag_Advanced_SIMD_arch),
27710 T (Tag_PCS_config),
27711 T (Tag_ABI_PCS_R9_use),
27712 T (Tag_ABI_PCS_RW_data),
27713 T (Tag_ABI_PCS_RO_data),
27714 T (Tag_ABI_PCS_GOT_use),
27715 T (Tag_ABI_PCS_wchar_t),
27716 T (Tag_ABI_FP_rounding),
27717 T (Tag_ABI_FP_denormal),
27718 T (Tag_ABI_FP_exceptions),
27719 T (Tag_ABI_FP_user_exceptions),
27720 T (Tag_ABI_FP_number_model),
27721 T (Tag_ABI_align_needed),
27722 T (Tag_ABI_align8_needed),
27723 T (Tag_ABI_align_preserved),
27724 T (Tag_ABI_align8_preserved),
27725 T (Tag_ABI_enum_size),
27726 T (Tag_ABI_HardFP_use),
27727 T (Tag_ABI_VFP_args),
27728 T (Tag_ABI_WMMX_args),
27729 T (Tag_ABI_optimization_goals),
27730 T (Tag_ABI_FP_optimization_goals),
27731 T (Tag_compatibility),
27732 T (Tag_CPU_unaligned_access),
27733 T (Tag_FP_HP_extension),
27734 T (Tag_VFP_HP_extension),
27735 T (Tag_ABI_FP_16bit_format),
27736 T (Tag_MPextension_use),
27738 T (Tag_nodefaults),
27739 T (Tag_also_compatible_with),
27740 T (Tag_conformance),
27742 T (Tag_Virtualization_use),
27743 T (Tag_DSP_extension),
27744 /* We deliberately do not include Tag_MPextension_use_legacy. */
27752 for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
27753 if (streq (name, attribute_table[i].name))
27754 return attribute_table[i].tag;
27759 /* Apply sym value for relocations only in the case that they are for
27760 local symbols in the same segment as the fixup and you have the
27761 respective architectural feature for blx and simple switches. */
27764 arm_apply_sym_value (struct fix * fixP, segT this_seg)
27767 && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
27768 /* PR 17444: If the local symbol is in a different section then a reloc
27769 will always be generated for it, so applying the symbol value now
27770 will result in a double offset being stored in the relocation. */
27771 && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
27772 && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
27774 switch (fixP->fx_r_type)
27776 case BFD_RELOC_ARM_PCREL_BLX:
27777 case BFD_RELOC_THUMB_PCREL_BRANCH23:
27778 if (ARM_IS_FUNC (fixP->fx_addsy))
27782 case BFD_RELOC_ARM_PCREL_CALL:
27783 case BFD_RELOC_THUMB_PCREL_BLX:
27784 if (THUMB_IS_FUNC (fixP->fx_addsy))
27795 #endif /* OBJ_ELF */