1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
33 #include "safe-ctype.h"
35 /* Need TARGET_CPU. */
42 #include "opcode/arm.h"
46 #include "dwarf2dbg.h"
47 #include "dw2gencfi.h"
50 /* XXX Set this to 1 after the next binutils release. */
51 #define WARN_DEPRECATED 0
54 /* Must be at least the size of the largest unwind opcode (currently two). */
55 #define ARM_OPCODE_CHUNK_SIZE 8
57 /* This structure holds the unwinding state. */
62 symbolS * table_entry;
63 symbolS * personality_routine;
64 int personality_index;
65 /* The segment containing the function. */
68 /* Opcodes generated from this function. */
69 unsigned char * opcodes;
72 /* The number of bytes pushed to the stack. */
74 /* We don't add stack adjustment opcodes immediately so that we can merge
75 multiple adjustments. We can also omit the final adjustment
76 when using a frame pointer. */
77 offsetT pending_offset;
78 /* These two fields are set by both unwind_movsp and unwind_setfp. They
79 hold the reg+offset to use when restoring sp from a frame pointer. */
82 /* Nonzero if an unwind_setfp directive has been seen. */
84 /* Nonzero if the last opcode restores sp from fp_reg. */
85 unsigned sp_restored:1;
88 /* Results from operand parsing worker functions. */
92 PARSE_OPERAND_SUCCESS,
94 PARSE_OPERAND_FAIL_NO_BACKTRACK
95 } parse_operand_result;
97 /* Bit N indicates that an R_ARM_NONE relocation has been output for
98 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
99 emitted only once per section, to save unnecessary bloat. */
100 static unsigned int marked_pr_dependency = 0;
107 ARM_FLOAT_ABI_SOFTFP,
111 /* Types of processor to assemble for. */
113 #if defined __XSCALE__
114 #define CPU_DEFAULT ARM_ARCH_XSCALE
116 #if defined __thumb__
117 #define CPU_DEFAULT ARM_ARCH_V5T
124 # define FPU_DEFAULT FPU_ARCH_FPA
125 # elif defined (TE_NetBSD)
127 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
129 /* Legacy a.out format. */
130 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
132 # elif defined (TE_VXWORKS)
133 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
135 /* For backwards compatibility, default to FPA. */
136 # define FPU_DEFAULT FPU_ARCH_FPA
138 #endif /* ifndef FPU_DEFAULT */
140 #define streq(a, b) (strcmp (a, b) == 0)
142 static arm_feature_set cpu_variant;
143 static arm_feature_set arm_arch_used;
144 static arm_feature_set thumb_arch_used;
146 /* Flags stored in private area of BFD structure. */
147 static int uses_apcs_26 = FALSE;
148 static int atpcs = FALSE;
149 static int support_interwork = FALSE;
150 static int uses_apcs_float = FALSE;
151 static int pic_code = FALSE;
153 /* Variables that we set while parsing command-line options. Once all
154 options have been read we re-process these values to set the real
156 static const arm_feature_set *legacy_cpu = NULL;
157 static const arm_feature_set *legacy_fpu = NULL;
159 static const arm_feature_set *mcpu_cpu_opt = NULL;
160 static const arm_feature_set *mcpu_fpu_opt = NULL;
161 static const arm_feature_set *march_cpu_opt = NULL;
162 static const arm_feature_set *march_fpu_opt = NULL;
163 static const arm_feature_set *mfpu_opt = NULL;
164 static const arm_feature_set *object_arch = NULL;
166 /* Constants for known architecture features. */
167 static const arm_feature_set fpu_default = FPU_DEFAULT;
168 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
169 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
170 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
171 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
172 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
173 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
174 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
175 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
178 static const arm_feature_set cpu_default = CPU_DEFAULT;
181 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
182 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
183 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
184 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
185 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
186 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
187 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
188 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
189 static const arm_feature_set arm_ext_v4t_5 =
190 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
191 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
192 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
193 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
194 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
195 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
196 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
197 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
198 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
199 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
200 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
201 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
202 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
203 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
204 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
206 static const arm_feature_set arm_arch_any = ARM_ANY;
207 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
208 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
209 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
211 static const arm_feature_set arm_cext_iwmmxt2 =
212 ARM_FEATURE (0, ARM_CEXT_IWMMXT2);
213 static const arm_feature_set arm_cext_iwmmxt =
214 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
215 static const arm_feature_set arm_cext_xscale =
216 ARM_FEATURE (0, ARM_CEXT_XSCALE);
217 static const arm_feature_set arm_cext_maverick =
218 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
219 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
220 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
221 static const arm_feature_set fpu_vfp_ext_v1xd =
222 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
223 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
224 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
225 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
226 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
227 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
228 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
230 static int mfloat_abi_opt = -1;
231 /* Record user cpu selection for object attributes. */
232 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
233 /* Must be long enough to hold any of the names in arm_cpus. */
234 static char selected_cpu_name[16];
237 static int meabi_flags = EABI_DEFAULT;
239 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
244 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
245 symbolS * GOT_symbol;
248 /* 0: assemble for ARM,
249 1: assemble for Thumb,
250 2: assemble for Thumb even though target CPU does not support thumb
252 static int thumb_mode = 0;
254 /* If unified_syntax is true, we are processing the new unified
255 ARM/Thumb syntax. Important differences from the old ARM mode:
257 - Immediate operands do not require a # prefix.
258 - Conditional affixes always appear at the end of the
259 instruction. (For backward compatibility, those instructions
260 that formerly had them in the middle, continue to accept them
262 - The IT instruction may appear, and if it does is validated
263 against subsequent conditional affixes. It does not generate
266 Important differences from the old Thumb mode:
268 - Immediate operands do not require a # prefix.
269 - Most of the V6T2 instructions are only available in unified mode.
270 - The .N and .W suffixes are recognized and honored (it is an error
271 if they cannot be honored).
272 - All instructions set the flags if and only if they have an 's' affix.
273 - Conditional affixes may be used. They are validated against
274 preceding IT instructions. Unlike ARM mode, you cannot use a
275 conditional affix except in the scope of an IT instruction. */
277 static bfd_boolean unified_syntax = FALSE;
292 enum neon_el_type type;
296 #define NEON_MAX_TYPE_ELS 4
300 struct neon_type_el el[NEON_MAX_TYPE_ELS];
307 unsigned long instruction;
311 /* "uncond_value" is set to the value in place of the conditional field in
312 unconditional versions of the instruction, or -1 if nothing is
315 struct neon_type vectype;
316 /* Set to the opcode if the instruction needs relaxation.
317 Zero if the instruction is not relaxed. */
321 bfd_reloc_code_real_type type;
330 struct neon_type_el vectype;
331 unsigned present : 1; /* Operand present. */
332 unsigned isreg : 1; /* Operand was a register. */
333 unsigned immisreg : 1; /* .imm field is a second register. */
334 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
335 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
336 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
337 instructions. This allows us to disambiguate ARM <-> vector insns. */
338 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
339 unsigned isvec : 1; /* Is a single, double or quad VFP/Neon reg. */
340 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
341 unsigned issingle : 1; /* Operand is VFP single-precision register. */
342 unsigned hasreloc : 1; /* Operand has relocation suffix. */
343 unsigned writeback : 1; /* Operand has trailing ! */
344 unsigned preind : 1; /* Preindexed address. */
345 unsigned postind : 1; /* Postindexed address. */
346 unsigned negative : 1; /* Index register was negated. */
347 unsigned shifted : 1; /* Shift applied to operation. */
348 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
352 static struct arm_it inst;
354 #define NUM_FLOAT_VALS 8
356 const char * fp_const[] =
358 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
361 /* Number of littlenums required to hold an extended precision number. */
362 #define MAX_LITTLENUMS 6
364 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
374 #define CP_T_X 0x00008000
375 #define CP_T_Y 0x00400000
377 #define CONDS_BIT 0x00100000
378 #define LOAD_BIT 0x00100000
380 #define DOUBLE_LOAD_FLAG 0x00000001
384 const char * template;
388 #define COND_ALWAYS 0xE
392 const char *template;
396 struct asm_barrier_opt
398 const char *template;
402 /* The bit that distinguishes CPSR and SPSR. */
403 #define SPSR_BIT (1 << 22)
405 /* The individual PSR flag bits. */
406 #define PSR_c (1 << 16)
407 #define PSR_x (1 << 17)
408 #define PSR_s (1 << 18)
409 #define PSR_f (1 << 19)
414 bfd_reloc_code_real_type reloc;
419 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
420 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
425 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
428 /* Bits for DEFINED field in neon_typed_alias. */
429 #define NTA_HASTYPE 1
430 #define NTA_HASINDEX 2
432 struct neon_typed_alias
434 unsigned char defined;
436 struct neon_type_el eltype;
439 /* ARM register categories. This includes coprocessor numbers and various
440 architecture extensions' registers. */
466 /* Structure for a hash table entry for a register.
467 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
468 information which states whether a vector type or index is specified (for a
469 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
473 unsigned char number;
475 unsigned char builtin;
476 struct neon_typed_alias *neon;
479 /* Diagnostics used when we don't get a register of the expected type. */
480 const char *const reg_expected_msgs[] =
482 N_("ARM register expected"),
483 N_("bad or missing co-processor number"),
484 N_("co-processor register expected"),
485 N_("FPA register expected"),
486 N_("VFP single precision register expected"),
487 N_("VFP/Neon double precision register expected"),
488 N_("Neon quad precision register expected"),
489 N_("VFP single or double precision register expected"),
490 N_("Neon double or quad precision register expected"),
491 N_("VFP single, double or Neon quad precision register expected"),
492 N_("VFP system register expected"),
493 N_("Maverick MVF register expected"),
494 N_("Maverick MVD register expected"),
495 N_("Maverick MVFX register expected"),
496 N_("Maverick MVDX register expected"),
497 N_("Maverick MVAX register expected"),
498 N_("Maverick DSPSC register expected"),
499 N_("iWMMXt data register expected"),
500 N_("iWMMXt control register expected"),
501 N_("iWMMXt scalar register expected"),
502 N_("XScale accumulator register expected"),
505 /* Some well known registers that we refer to directly elsewhere. */
510 /* ARM instructions take 4bytes in the object file, Thumb instructions
516 /* Basic string to match. */
517 const char *template;
519 /* Parameters to instruction. */
520 unsigned char operands[8];
522 /* Conditional tag - see opcode_lookup. */
523 unsigned int tag : 4;
525 /* Basic instruction code. */
526 unsigned int avalue : 28;
528 /* Thumb-format instruction code. */
531 /* Which architecture variant provides this instruction. */
532 const arm_feature_set *avariant;
533 const arm_feature_set *tvariant;
535 /* Function to call to encode instruction in ARM format. */
536 void (* aencode) (void);
538 /* Function to call to encode instruction in Thumb format. */
539 void (* tencode) (void);
542 /* Defines for various bits that we will want to toggle. */
543 #define INST_IMMEDIATE 0x02000000
544 #define OFFSET_REG 0x02000000
545 #define HWOFFSET_IMM 0x00400000
546 #define SHIFT_BY_REG 0x00000010
547 #define PRE_INDEX 0x01000000
548 #define INDEX_UP 0x00800000
549 #define WRITE_BACK 0x00200000
550 #define LDM_TYPE_2_OR_3 0x00400000
552 #define LITERAL_MASK 0xf000f000
553 #define OPCODE_MASK 0xfe1fffff
554 #define V4_STR_BIT 0x00000020
556 #define DATA_OP_SHIFT 21
558 #define T2_OPCODE_MASK 0xfe1fffff
559 #define T2_DATA_OP_SHIFT 21
561 /* Codes to distinguish the arithmetic instructions. */
572 #define OPCODE_CMP 10
573 #define OPCODE_CMN 11
574 #define OPCODE_ORR 12
575 #define OPCODE_MOV 13
576 #define OPCODE_BIC 14
577 #define OPCODE_MVN 15
579 #define T2_OPCODE_AND 0
580 #define T2_OPCODE_BIC 1
581 #define T2_OPCODE_ORR 2
582 #define T2_OPCODE_ORN 3
583 #define T2_OPCODE_EOR 4
584 #define T2_OPCODE_ADD 8
585 #define T2_OPCODE_ADC 10
586 #define T2_OPCODE_SBC 11
587 #define T2_OPCODE_SUB 13
588 #define T2_OPCODE_RSB 14
590 #define T_OPCODE_MUL 0x4340
591 #define T_OPCODE_TST 0x4200
592 #define T_OPCODE_CMN 0x42c0
593 #define T_OPCODE_NEG 0x4240
594 #define T_OPCODE_MVN 0x43c0
596 #define T_OPCODE_ADD_R3 0x1800
597 #define T_OPCODE_SUB_R3 0x1a00
598 #define T_OPCODE_ADD_HI 0x4400
599 #define T_OPCODE_ADD_ST 0xb000
600 #define T_OPCODE_SUB_ST 0xb080
601 #define T_OPCODE_ADD_SP 0xa800
602 #define T_OPCODE_ADD_PC 0xa000
603 #define T_OPCODE_ADD_I8 0x3000
604 #define T_OPCODE_SUB_I8 0x3800
605 #define T_OPCODE_ADD_I3 0x1c00
606 #define T_OPCODE_SUB_I3 0x1e00
608 #define T_OPCODE_ASR_R 0x4100
609 #define T_OPCODE_LSL_R 0x4080
610 #define T_OPCODE_LSR_R 0x40c0
611 #define T_OPCODE_ROR_R 0x41c0
612 #define T_OPCODE_ASR_I 0x1000
613 #define T_OPCODE_LSL_I 0x0000
614 #define T_OPCODE_LSR_I 0x0800
616 #define T_OPCODE_MOV_I8 0x2000
617 #define T_OPCODE_CMP_I8 0x2800
618 #define T_OPCODE_CMP_LR 0x4280
619 #define T_OPCODE_MOV_HR 0x4600
620 #define T_OPCODE_CMP_HR 0x4500
622 #define T_OPCODE_LDR_PC 0x4800
623 #define T_OPCODE_LDR_SP 0x9800
624 #define T_OPCODE_STR_SP 0x9000
625 #define T_OPCODE_LDR_IW 0x6800
626 #define T_OPCODE_STR_IW 0x6000
627 #define T_OPCODE_LDR_IH 0x8800
628 #define T_OPCODE_STR_IH 0x8000
629 #define T_OPCODE_LDR_IB 0x7800
630 #define T_OPCODE_STR_IB 0x7000
631 #define T_OPCODE_LDR_RW 0x5800
632 #define T_OPCODE_STR_RW 0x5000
633 #define T_OPCODE_LDR_RH 0x5a00
634 #define T_OPCODE_STR_RH 0x5200
635 #define T_OPCODE_LDR_RB 0x5c00
636 #define T_OPCODE_STR_RB 0x5400
638 #define T_OPCODE_PUSH 0xb400
639 #define T_OPCODE_POP 0xbc00
641 #define T_OPCODE_BRANCH 0xe000
643 #define THUMB_SIZE 2 /* Size of thumb instruction. */
644 #define THUMB_PP_PC_LR 0x0100
645 #define THUMB_LOAD_BIT 0x0800
646 #define THUMB2_LOAD_BIT 0x00100000
648 #define BAD_ARGS _("bad arguments to instruction")
649 #define BAD_PC _("r15 not allowed here")
650 #define BAD_COND _("instruction cannot be conditional")
651 #define BAD_OVERLAP _("registers may not be the same")
652 #define BAD_HIREG _("lo register required")
653 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
654 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
655 #define BAD_BRANCH _("branch must be last instruction in IT block")
656 #define BAD_NOT_IT _("instruction not allowed in IT block")
657 #define BAD_FPU _("selected FPU does not support instruction")
659 static struct hash_control *arm_ops_hsh;
660 static struct hash_control *arm_cond_hsh;
661 static struct hash_control *arm_shift_hsh;
662 static struct hash_control *arm_psr_hsh;
663 static struct hash_control *arm_v7m_psr_hsh;
664 static struct hash_control *arm_reg_hsh;
665 static struct hash_control *arm_reloc_hsh;
666 static struct hash_control *arm_barrier_opt_hsh;
668 /* Stuff needed to resolve the label ambiguity
678 symbolS * last_label_seen;
679 static int label_is_thumb_function_name = FALSE;
681 /* Literal pool structure. Held on a per-section
682 and per-sub-section basis. */
684 #define MAX_LITERAL_POOL_SIZE 1024
685 typedef struct literal_pool
687 expressionS literals [MAX_LITERAL_POOL_SIZE];
688 unsigned int next_free_entry;
693 struct literal_pool * next;
696 /* Pointer to a linked list of literal pools. */
697 literal_pool * list_of_pools = NULL;
699 /* State variables for IT block handling. */
700 static bfd_boolean current_it_mask = 0;
701 static int current_cc;
706 /* This array holds the chars that always start a comment. If the
707 pre-processor is disabled, these aren't very useful. */
708 const char comment_chars[] = "@";
710 /* This array holds the chars that only start a comment at the beginning of
711 a line. If the line seems to have the form '# 123 filename'
712 .line and .file directives will appear in the pre-processed output. */
713 /* Note that input_file.c hand checks for '#' at the beginning of the
714 first line of the input file. This is because the compiler outputs
715 #NO_APP at the beginning of its output. */
716 /* Also note that comments like this one will always work. */
717 const char line_comment_chars[] = "#";
719 const char line_separator_chars[] = ";";
721 /* Chars that can be used to separate mant
722 from exp in floating point numbers. */
723 const char EXP_CHARS[] = "eE";
725 /* Chars that mean this number is a floating point constant. */
729 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
731 /* Prefix characters that indicate the start of an immediate
733 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
735 /* Separator character handling. */
737 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
740 skip_past_char (char ** str, char c)
750 #define skip_past_comma(str) skip_past_char (str, ',')
752 /* Arithmetic expressions (possibly involving symbols). */
754 /* Return TRUE if anything in the expression is a bignum. */
757 walk_no_bignums (symbolS * sp)
759 if (symbol_get_value_expression (sp)->X_op == O_big)
762 if (symbol_get_value_expression (sp)->X_add_symbol)
764 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
765 || (symbol_get_value_expression (sp)->X_op_symbol
766 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
772 static int in_my_get_expression = 0;
774 /* Third argument to my_get_expression. */
775 #define GE_NO_PREFIX 0
776 #define GE_IMM_PREFIX 1
777 #define GE_OPT_PREFIX 2
778 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
779 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
780 #define GE_OPT_PREFIX_BIG 3
783 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
788 /* In unified syntax, all prefixes are optional. */
790 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
795 case GE_NO_PREFIX: break;
797 if (!is_immediate_prefix (**str))
799 inst.error = _("immediate expression requires a # prefix");
805 case GE_OPT_PREFIX_BIG:
806 if (is_immediate_prefix (**str))
812 memset (ep, 0, sizeof (expressionS));
814 save_in = input_line_pointer;
815 input_line_pointer = *str;
816 in_my_get_expression = 1;
817 seg = expression (ep);
818 in_my_get_expression = 0;
820 if (ep->X_op == O_illegal)
822 /* We found a bad expression in md_operand(). */
823 *str = input_line_pointer;
824 input_line_pointer = save_in;
825 if (inst.error == NULL)
826 inst.error = _("bad expression");
831 if (seg != absolute_section
832 && seg != text_section
833 && seg != data_section
834 && seg != bss_section
835 && seg != undefined_section)
837 inst.error = _("bad segment");
838 *str = input_line_pointer;
839 input_line_pointer = save_in;
844 /* Get rid of any bignums now, so that we don't generate an error for which
845 we can't establish a line number later on. Big numbers are never valid
846 in instructions, which is where this routine is always called. */
847 if (prefix_mode != GE_OPT_PREFIX_BIG
848 && (ep->X_op == O_big
850 && (walk_no_bignums (ep->X_add_symbol)
852 && walk_no_bignums (ep->X_op_symbol))))))
854 inst.error = _("invalid constant");
855 *str = input_line_pointer;
856 input_line_pointer = save_in;
860 *str = input_line_pointer;
861 input_line_pointer = save_in;
865 /* Turn a string in input_line_pointer into a floating point constant
866 of type TYPE, and store the appropriate bytes in *LITP. The number
867 of LITTLENUMS emitted is stored in *SIZEP. An error message is
868 returned, or NULL on OK.
870 Note that fp constants aren't represent in the normal way on the ARM.
871 In big endian mode, things are as expected. However, in little endian
872 mode fp constants are big-endian word-wise, and little-endian byte-wise
873 within the words. For example, (double) 1.1 in big endian mode is
874 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
875 the byte sequence 99 99 f1 3f 9a 99 99 99.
877 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
880 md_atof (int type, char * litP, int * sizeP)
883 LITTLENUM_TYPE words[MAX_LITTLENUMS];
915 return _("bad call to MD_ATOF()");
918 t = atof_ieee (input_line_pointer, type, words);
920 input_line_pointer = t;
923 if (target_big_endian)
925 for (i = 0; i < prec; i++)
927 md_number_to_chars (litP, (valueT) words[i], 2);
933 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
934 for (i = prec - 1; i >= 0; i--)
936 md_number_to_chars (litP, (valueT) words[i], 2);
940 /* For a 4 byte float the order of elements in `words' is 1 0.
941 For an 8 byte float the order is 1 0 3 2. */
942 for (i = 0; i < prec; i += 2)
944 md_number_to_chars (litP, (valueT) words[i + 1], 2);
945 md_number_to_chars (litP + 2, (valueT) words[i], 2);
953 /* We handle all bad expressions here, so that we can report the faulty
954 instruction in the error message. */
956 md_operand (expressionS * expr)
958 if (in_my_get_expression)
959 expr->X_op = O_illegal;
962 /* Immediate values. */
964 /* Generic immediate-value read function for use in directives.
965 Accepts anything that 'expression' can fold to a constant.
966 *val receives the number. */
969 immediate_for_directive (int *val)
972 exp.X_op = O_illegal;
974 if (is_immediate_prefix (*input_line_pointer))
976 input_line_pointer++;
980 if (exp.X_op != O_constant)
982 as_bad (_("expected #constant"));
983 ignore_rest_of_line ();
986 *val = exp.X_add_number;
991 /* Register parsing. */
993 /* Generic register parser. CCP points to what should be the
994 beginning of a register name. If it is indeed a valid register
995 name, advance CCP over it and return the reg_entry structure;
996 otherwise return NULL. Does not issue diagnostics. */
998 static struct reg_entry *
999 arm_reg_parse_multi (char **ccp)
1003 struct reg_entry *reg;
1005 #ifdef REGISTER_PREFIX
1006 if (*start != REGISTER_PREFIX)
1010 #ifdef OPTIONAL_REGISTER_PREFIX
1011 if (*start == OPTIONAL_REGISTER_PREFIX)
1016 if (!ISALPHA (*p) || !is_name_beginner (*p))
1021 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1023 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1033 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1034 enum arm_reg_type type)
1036 /* Alternative syntaxes are accepted for a few register classes. */
1043 /* Generic coprocessor register names are allowed for these. */
1044 if (reg && reg->type == REG_TYPE_CN)
1049 /* For backward compatibility, a bare number is valid here. */
1051 unsigned long processor = strtoul (start, ccp, 10);
1052 if (*ccp != start && processor <= 15)
1056 case REG_TYPE_MMXWC:
1057 /* WC includes WCG. ??? I'm not sure this is true for all
1058 instructions that take WC registers. */
1059 if (reg && reg->type == REG_TYPE_MMXWCG)
1070 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1071 return value is the register number or FAIL. */
1074 arm_reg_parse (char **ccp, enum arm_reg_type type)
1077 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1080 /* Do not allow a scalar (reg+index) to parse as a register. */
1081 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1084 if (reg && reg->type == type)
1087 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1094 /* Parse a Neon type specifier. *STR should point at the leading '.'
1095 character. Does no verification at this stage that the type fits the opcode
1102 Can all be legally parsed by this function.
1104 Fills in neon_type struct pointer with parsed information, and updates STR
1105 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1106 type, FAIL if not. */
1109 parse_neon_type (struct neon_type *type, char **str)
1116 while (type->elems < NEON_MAX_TYPE_ELS)
1118 enum neon_el_type thistype = NT_untyped;
1119 unsigned thissize = -1u;
1126 /* Just a size without an explicit type. */
1130 switch (TOLOWER (*ptr))
1132 case 'i': thistype = NT_integer; break;
1133 case 'f': thistype = NT_float; break;
1134 case 'p': thistype = NT_poly; break;
1135 case 's': thistype = NT_signed; break;
1136 case 'u': thistype = NT_unsigned; break;
1138 thistype = NT_float;
1143 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1149 /* .f is an abbreviation for .f32. */
1150 if (thistype == NT_float && !ISDIGIT (*ptr))
1155 thissize = strtoul (ptr, &ptr, 10);
1157 if (thissize != 8 && thissize != 16 && thissize != 32
1160 as_bad (_("bad size %d in type specifier"), thissize);
1168 type->el[type->elems].type = thistype;
1169 type->el[type->elems].size = thissize;
1174 /* Empty/missing type is not a successful parse. */
1175 if (type->elems == 0)
1183 /* Errors may be set multiple times during parsing or bit encoding
1184 (particularly in the Neon bits), but usually the earliest error which is set
1185 will be the most meaningful. Avoid overwriting it with later (cascading)
1186 errors by calling this function. */
1189 first_error (const char *err)
1195 /* Parse a single type, e.g. ".s32", leading period included. */
1197 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1200 struct neon_type optype;
1204 if (parse_neon_type (&optype, &str) == SUCCESS)
1206 if (optype.elems == 1)
1207 *vectype = optype.el[0];
1210 first_error (_("only one type should be specified for operand"));
1216 first_error (_("vector type expected"));
1228 /* Special meanings for indices (which have a range of 0-7), which will fit into
1231 #define NEON_ALL_LANES 15
1232 #define NEON_INTERLEAVE_LANES 14
1234 /* Parse either a register or a scalar, with an optional type. Return the
1235 register number, and optionally fill in the actual type of the register
1236 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1237 type/index information in *TYPEINFO. */
1240 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1241 enum arm_reg_type *rtype,
1242 struct neon_typed_alias *typeinfo)
1245 struct reg_entry *reg = arm_reg_parse_multi (&str);
1246 struct neon_typed_alias atype;
1247 struct neon_type_el parsetype;
1251 atype.eltype.type = NT_invtype;
1252 atype.eltype.size = -1;
1254 /* Try alternate syntax for some types of register. Note these are mutually
1255 exclusive with the Neon syntax extensions. */
1258 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1266 /* Undo polymorphism when a set of register types may be accepted. */
1267 if ((type == REG_TYPE_NDQ
1268 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1269 || (type == REG_TYPE_VFSD
1270 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1271 || (type == REG_TYPE_NSDQ
1272 && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1273 || reg->type == REG_TYPE_NQ)))
1276 if (type != reg->type)
1282 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1284 if ((atype.defined & NTA_HASTYPE) != 0)
1286 first_error (_("can't redefine type for operand"));
1289 atype.defined |= NTA_HASTYPE;
1290 atype.eltype = parsetype;
1293 if (skip_past_char (&str, '[') == SUCCESS)
1295 if (type != REG_TYPE_VFD)
1297 first_error (_("only D registers may be indexed"));
1301 if ((atype.defined & NTA_HASINDEX) != 0)
1303 first_error (_("can't change index for operand"));
1307 atype.defined |= NTA_HASINDEX;
1309 if (skip_past_char (&str, ']') == SUCCESS)
1310 atype.index = NEON_ALL_LANES;
1315 my_get_expression (&exp, &str, GE_NO_PREFIX);
1317 if (exp.X_op != O_constant)
1319 first_error (_("constant expression required"));
1323 if (skip_past_char (&str, ']') == FAIL)
1326 atype.index = exp.X_add_number;
1341 /* Like arm_reg_parse, but allow allow the following extra features:
1342 - If RTYPE is non-zero, return the (possibly restricted) type of the
1343 register (e.g. Neon double or quad reg when either has been requested).
1344 - If this is a Neon vector type with additional type information, fill
1345 in the struct pointed to by VECTYPE (if non-NULL).
1346 This function will fault on encountering a scalar.
1350 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1351 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1353 struct neon_typed_alias atype;
1355 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1360 /* Do not allow a scalar (reg+index) to parse as a register. */
1361 if ((atype.defined & NTA_HASINDEX) != 0)
1363 first_error (_("register operand expected, but got scalar"));
1368 *vectype = atype.eltype;
1375 #define NEON_SCALAR_REG(X) ((X) >> 4)
1376 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1378 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1379 have enough information to be able to do a good job bounds-checking. So, we
1380 just do easy checks here, and do further checks later. */
1383 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1387 struct neon_typed_alias atype;
1389 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1391 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1394 if (atype.index == NEON_ALL_LANES)
1396 first_error (_("scalar must have an index"));
1399 else if (atype.index >= 64 / elsize)
1401 first_error (_("scalar index out of range"));
1406 *type = atype.eltype;
1410 return reg * 16 + atype.index;
1413 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1415 parse_reg_list (char ** strp)
1417 char * str = * strp;
1421 /* We come back here if we get ranges concatenated by '+' or '|'. */
1436 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1438 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1448 first_error (_("bad range in register list"));
1452 for (i = cur_reg + 1; i < reg; i++)
1454 if (range & (1 << i))
1456 (_("Warning: duplicated register (r%d) in register list"),
1464 if (range & (1 << reg))
1465 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1467 else if (reg <= cur_reg)
1468 as_tsktsk (_("Warning: register range not in ascending order"));
1473 while (skip_past_comma (&str) != FAIL
1474 || (in_range = 1, *str++ == '-'));
1479 first_error (_("missing `}'"));
1487 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1490 if (expr.X_op == O_constant)
1492 if (expr.X_add_number
1493 != (expr.X_add_number & 0x0000ffff))
1495 inst.error = _("invalid register mask");
1499 if ((range & expr.X_add_number) != 0)
1501 int regno = range & expr.X_add_number;
1504 regno = (1 << regno) - 1;
1506 (_("Warning: duplicated register (r%d) in register list"),
1510 range |= expr.X_add_number;
1514 if (inst.reloc.type != 0)
1516 inst.error = _("expression too complex");
1520 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1521 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1522 inst.reloc.pc_rel = 0;
1526 if (*str == '|' || *str == '+')
1532 while (another_range);
1538 /* Types of registers in a list. */
1547 /* Parse a VFP register list. If the string is invalid return FAIL.
1548 Otherwise return the number of registers, and set PBASE to the first
1549 register. Parses registers of type ETYPE.
1550 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1551 - Q registers can be used to specify pairs of D registers
1552 - { } can be omitted from around a singleton register list
1553 FIXME: This is not implemented, as it would require backtracking in
1556 This could be done (the meaning isn't really ambiguous), but doesn't
1557 fit in well with the current parsing framework.
1558 - 32 D registers may be used (also true for VFPv3).
1559 FIXME: Types are ignored in these register lists, which is probably a
1563 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype)
1568 enum arm_reg_type regtype = 0;
1572 unsigned long mask = 0;
1577 inst.error = _("expecting {");
1586 regtype = REG_TYPE_VFS;
1591 regtype = REG_TYPE_VFD;
1594 case REGLIST_NEON_D:
1595 regtype = REG_TYPE_NDQ;
1599 if (etype != REGLIST_VFP_S)
1601 /* VFPv3 allows 32 D registers. */
1602 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1606 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1609 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1616 base_reg = max_regs;
1620 int setmask = 1, addregs = 1;
1622 new_base = arm_typed_reg_parse (&str, regtype, ®type, NULL);
1624 if (new_base == FAIL)
1626 first_error (_(reg_expected_msgs[regtype]));
1630 if (new_base >= max_regs)
1632 first_error (_("register out of range in list"));
1636 /* Note: a value of 2 * n is returned for the register Q<n>. */
1637 if (regtype == REG_TYPE_NQ)
1643 if (new_base < base_reg)
1644 base_reg = new_base;
1646 if (mask & (setmask << new_base))
1648 first_error (_("invalid register list"));
1652 if ((mask >> new_base) != 0 && ! warned)
1654 as_tsktsk (_("register list not in ascending order"));
1658 mask |= setmask << new_base;
1661 if (*str == '-') /* We have the start of a range expression */
1667 if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
1670 inst.error = gettext (reg_expected_msgs[regtype]);
1674 if (high_range >= max_regs)
1676 first_error (_("register out of range in list"));
1680 if (regtype == REG_TYPE_NQ)
1681 high_range = high_range + 1;
1683 if (high_range <= new_base)
1685 inst.error = _("register range not in ascending order");
1689 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1691 if (mask & (setmask << new_base))
1693 inst.error = _("invalid register list");
1697 mask |= setmask << new_base;
1702 while (skip_past_comma (&str) != FAIL);
1706 /* Sanity check -- should have raised a parse error above. */
1707 if (count == 0 || count > max_regs)
1712 /* Final test -- the registers must be consecutive. */
1714 for (i = 0; i < count; i++)
1716 if ((mask & (1u << i)) == 0)
1718 inst.error = _("non-contiguous register range");
1728 /* True if two alias types are the same. */
1731 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1739 if (a->defined != b->defined)
1742 if ((a->defined & NTA_HASTYPE) != 0
1743 && (a->eltype.type != b->eltype.type
1744 || a->eltype.size != b->eltype.size))
1747 if ((a->defined & NTA_HASINDEX) != 0
1748 && (a->index != b->index))
1754 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1755 The base register is put in *PBASE.
1756 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1758 The register stride (minus one) is put in bit 4 of the return value.
1759 Bits [6:5] encode the list length (minus one).
1760 The type of the list elements is put in *ELTYPE, if non-NULL. */
1762 #define NEON_LANE(X) ((X) & 0xf)
1763 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1764 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1767 parse_neon_el_struct_list (char **str, unsigned *pbase,
1768 struct neon_type_el *eltype)
1775 int leading_brace = 0;
1776 enum arm_reg_type rtype = REG_TYPE_NDQ;
1778 const char *const incr_error = "register stride must be 1 or 2";
1779 const char *const type_error = "mismatched element/structure types in list";
1780 struct neon_typed_alias firsttype;
1782 if (skip_past_char (&ptr, '{') == SUCCESS)
1787 struct neon_typed_alias atype;
1788 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1792 first_error (_(reg_expected_msgs[rtype]));
1799 if (rtype == REG_TYPE_NQ)
1806 else if (reg_incr == -1)
1808 reg_incr = getreg - base_reg;
1809 if (reg_incr < 1 || reg_incr > 2)
1811 first_error (_(incr_error));
1815 else if (getreg != base_reg + reg_incr * count)
1817 first_error (_(incr_error));
1821 if (!neon_alias_types_same (&atype, &firsttype))
1823 first_error (_(type_error));
1827 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1831 struct neon_typed_alias htype;
1832 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1834 lane = NEON_INTERLEAVE_LANES;
1835 else if (lane != NEON_INTERLEAVE_LANES)
1837 first_error (_(type_error));
1842 else if (reg_incr != 1)
1844 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1848 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1851 first_error (_(reg_expected_msgs[rtype]));
1854 if (!neon_alias_types_same (&htype, &firsttype))
1856 first_error (_(type_error));
1859 count += hireg + dregs - getreg;
1863 /* If we're using Q registers, we can't use [] or [n] syntax. */
1864 if (rtype == REG_TYPE_NQ)
1870 if ((atype.defined & NTA_HASINDEX) != 0)
1874 else if (lane != atype.index)
1876 first_error (_(type_error));
1880 else if (lane == -1)
1881 lane = NEON_INTERLEAVE_LANES;
1882 else if (lane != NEON_INTERLEAVE_LANES)
1884 first_error (_(type_error));
1889 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1891 /* No lane set by [x]. We must be interleaving structures. */
1893 lane = NEON_INTERLEAVE_LANES;
1896 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1897 || (count > 1 && reg_incr == -1))
1899 first_error (_("error parsing element/structure list"));
1903 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1905 first_error (_("expected }"));
1913 *eltype = firsttype.eltype;
1918 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1921 /* Parse an explicit relocation suffix on an expression. This is
1922 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1923 arm_reloc_hsh contains no entries, so this function can only
1924 succeed if there is no () after the word. Returns -1 on error,
1925 BFD_RELOC_UNUSED if there wasn't any suffix. */
1927 parse_reloc (char **str)
1929 struct reloc_entry *r;
1933 return BFD_RELOC_UNUSED;
1938 while (*q && *q != ')' && *q != ',')
1943 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1950 /* Directives: register aliases. */
1952 static struct reg_entry *
1953 insert_reg_alias (char *str, int number, int type)
1955 struct reg_entry *new;
1958 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1961 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1963 /* Only warn about a redefinition if it's not defined as the
1965 else if (new->number != number || new->type != type)
1966 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1971 name = xstrdup (str);
1972 new = xmalloc (sizeof (struct reg_entry));
1975 new->number = number;
1977 new->builtin = FALSE;
1980 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1987 insert_neon_reg_alias (char *str, int number, int type,
1988 struct neon_typed_alias *atype)
1990 struct reg_entry *reg = insert_reg_alias (str, number, type);
1994 first_error (_("attempt to redefine typed alias"));
2000 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
2001 *reg->neon = *atype;
2005 /* Look for the .req directive. This is of the form:
2007 new_register_name .req existing_register_name
2009 If we find one, or if it looks sufficiently like one that we want to
2010 handle any error here, return non-zero. Otherwise return zero. */
2013 create_register_alias (char * newname, char *p)
2015 struct reg_entry *old;
2016 char *oldname, *nbuf;
2019 /* The input scrubber ensures that whitespace after the mnemonic is
2020 collapsed to single spaces. */
2022 if (strncmp (oldname, " .req ", 6) != 0)
2026 if (*oldname == '\0')
2029 old = hash_find (arm_reg_hsh, oldname);
2032 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2036 /* If TC_CASE_SENSITIVE is defined, then newname already points to
2037 the desired alias name, and p points to its end. If not, then
2038 the desired alias name is in the global original_case_string. */
2039 #ifdef TC_CASE_SENSITIVE
2042 newname = original_case_string;
2043 nlen = strlen (newname);
2046 nbuf = alloca (nlen + 1);
2047 memcpy (nbuf, newname, nlen);
2050 /* Create aliases under the new name as stated; an all-lowercase
2051 version of the new name; and an all-uppercase version of the new
2053 insert_reg_alias (nbuf, old->number, old->type);
2055 for (p = nbuf; *p; p++)
2058 if (strncmp (nbuf, newname, nlen))
2059 insert_reg_alias (nbuf, old->number, old->type);
2061 for (p = nbuf; *p; p++)
2064 if (strncmp (nbuf, newname, nlen))
2065 insert_reg_alias (nbuf, old->number, old->type);
2070 /* Create a Neon typed/indexed register alias using directives, e.g.:
2075 These typed registers can be used instead of the types specified after the
2076 Neon mnemonic, so long as all operands given have types. Types can also be
2077 specified directly, e.g.:
2078 vadd d0.s32, d1.s32, d2.s32
2082 create_neon_reg_alias (char *newname, char *p)
2084 enum arm_reg_type basetype;
2085 struct reg_entry *basereg;
2086 struct reg_entry mybasereg;
2087 struct neon_type ntype;
2088 struct neon_typed_alias typeinfo;
2089 char *namebuf, *nameend;
2092 typeinfo.defined = 0;
2093 typeinfo.eltype.type = NT_invtype;
2094 typeinfo.eltype.size = -1;
2095 typeinfo.index = -1;
2099 if (strncmp (p, " .dn ", 5) == 0)
2100 basetype = REG_TYPE_VFD;
2101 else if (strncmp (p, " .qn ", 5) == 0)
2102 basetype = REG_TYPE_NQ;
2111 basereg = arm_reg_parse_multi (&p);
2113 if (basereg && basereg->type != basetype)
2115 as_bad (_("bad type for register"));
2119 if (basereg == NULL)
2122 /* Try parsing as an integer. */
2123 my_get_expression (&exp, &p, GE_NO_PREFIX);
2124 if (exp.X_op != O_constant)
2126 as_bad (_("expression must be constant"));
2129 basereg = &mybasereg;
2130 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2136 typeinfo = *basereg->neon;
2138 if (parse_neon_type (&ntype, &p) == SUCCESS)
2140 /* We got a type. */
2141 if (typeinfo.defined & NTA_HASTYPE)
2143 as_bad (_("can't redefine the type of a register alias"));
2147 typeinfo.defined |= NTA_HASTYPE;
2148 if (ntype.elems != 1)
2150 as_bad (_("you must specify a single type only"));
2153 typeinfo.eltype = ntype.el[0];
2156 if (skip_past_char (&p, '[') == SUCCESS)
2159 /* We got a scalar index. */
2161 if (typeinfo.defined & NTA_HASINDEX)
2163 as_bad (_("can't redefine the index of a scalar alias"));
2167 my_get_expression (&exp, &p, GE_NO_PREFIX);
2169 if (exp.X_op != O_constant)
2171 as_bad (_("scalar index must be constant"));
2175 typeinfo.defined |= NTA_HASINDEX;
2176 typeinfo.index = exp.X_add_number;
2178 if (skip_past_char (&p, ']') == FAIL)
2180 as_bad (_("expecting ]"));
2185 namelen = nameend - newname;
2186 namebuf = alloca (namelen + 1);
2187 strncpy (namebuf, newname, namelen);
2188 namebuf[namelen] = '\0';
2190 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2191 typeinfo.defined != 0 ? &typeinfo : NULL);
2193 /* Insert name in all uppercase. */
2194 for (p = namebuf; *p; p++)
2197 if (strncmp (namebuf, newname, namelen))
2198 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2199 typeinfo.defined != 0 ? &typeinfo : NULL);
2201 /* Insert name in all lowercase. */
2202 for (p = namebuf; *p; p++)
2205 if (strncmp (namebuf, newname, namelen))
2206 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2207 typeinfo.defined != 0 ? &typeinfo : NULL);
2212 /* Should never be called, as .req goes between the alias and the
2213 register name, not at the beginning of the line. */
2215 s_req (int a ATTRIBUTE_UNUSED)
2217 as_bad (_("invalid syntax for .req directive"));
2221 s_dn (int a ATTRIBUTE_UNUSED)
2223 as_bad (_("invalid syntax for .dn directive"));
2227 s_qn (int a ATTRIBUTE_UNUSED)
2229 as_bad (_("invalid syntax for .qn directive"));
2232 /* The .unreq directive deletes an alias which was previously defined
2233 by .req. For example:
2239 s_unreq (int a ATTRIBUTE_UNUSED)
2244 name = input_line_pointer;
2246 while (*input_line_pointer != 0
2247 && *input_line_pointer != ' '
2248 && *input_line_pointer != '\n')
2249 ++input_line_pointer;
2251 saved_char = *input_line_pointer;
2252 *input_line_pointer = 0;
2255 as_bad (_("invalid syntax for .unreq directive"));
2258 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2261 as_bad (_("unknown register alias '%s'"), name);
2262 else if (reg->builtin)
2263 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2267 hash_delete (arm_reg_hsh, name);
2268 free ((char *) reg->name);
2275 *input_line_pointer = saved_char;
2276 demand_empty_rest_of_line ();
2279 /* Directives: Instruction set selection. */
2282 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2283 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2284 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2285 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2287 static enum mstate mapstate = MAP_UNDEFINED;
2290 mapping_state (enum mstate state)
2293 const char * symname;
2296 if (mapstate == state)
2297 /* The mapping symbol has already been emitted.
2298 There is nothing else to do. */
2307 type = BSF_NO_FLAGS;
2311 type = BSF_NO_FLAGS;
2315 type = BSF_NO_FLAGS;
2323 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2325 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2326 symbol_table_insert (symbolP);
2327 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2332 THUMB_SET_FUNC (symbolP, 0);
2333 ARM_SET_THUMB (symbolP, 0);
2334 ARM_SET_INTERWORK (symbolP, support_interwork);
2338 THUMB_SET_FUNC (symbolP, 1);
2339 ARM_SET_THUMB (symbolP, 1);
2340 ARM_SET_INTERWORK (symbolP, support_interwork);
2349 #define mapping_state(x) /* nothing */
2352 /* Find the real, Thumb encoded start of a Thumb function. */
2355 find_real_start (symbolS * symbolP)
2358 const char * name = S_GET_NAME (symbolP);
2359 symbolS * new_target;
2361 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2362 #define STUB_NAME ".real_start_of"
2367 /* The compiler may generate BL instructions to local labels because
2368 it needs to perform a branch to a far away location. These labels
2369 do not have a corresponding ".real_start_of" label. We check
2370 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2371 the ".real_start_of" convention for nonlocal branches. */
2372 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2375 real_start = ACONCAT ((STUB_NAME, name, NULL));
2376 new_target = symbol_find (real_start);
2378 if (new_target == NULL)
2380 as_warn ("Failed to find real start of function: %s\n", name);
2381 new_target = symbolP;
2388 opcode_select (int width)
2395 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2396 as_bad (_("selected processor does not support THUMB opcodes"));
2399 /* No need to force the alignment, since we will have been
2400 coming from ARM mode, which is word-aligned. */
2401 record_alignment (now_seg, 1);
2403 mapping_state (MAP_THUMB);
2409 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2410 as_bad (_("selected processor does not support ARM opcodes"));
2415 frag_align (2, 0, 0);
2417 record_alignment (now_seg, 1);
2419 mapping_state (MAP_ARM);
2423 as_bad (_("invalid instruction size selected (%d)"), width);
2428 s_arm (int ignore ATTRIBUTE_UNUSED)
2431 demand_empty_rest_of_line ();
2435 s_thumb (int ignore ATTRIBUTE_UNUSED)
2438 demand_empty_rest_of_line ();
2442 s_code (int unused ATTRIBUTE_UNUSED)
2446 temp = get_absolute_expression ();
2451 opcode_select (temp);
2455 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2460 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2462 /* If we are not already in thumb mode go into it, EVEN if
2463 the target processor does not support thumb instructions.
2464 This is used by gcc/config/arm/lib1funcs.asm for example
2465 to compile interworking support functions even if the
2466 target processor should not support interworking. */
2470 record_alignment (now_seg, 1);
2473 demand_empty_rest_of_line ();
2477 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2481 /* The following label is the name/address of the start of a Thumb function.
2482 We need to know this for the interworking support. */
2483 label_is_thumb_function_name = TRUE;
2486 /* Perform a .set directive, but also mark the alias as
2487 being a thumb function. */
2490 s_thumb_set (int equiv)
2492 /* XXX the following is a duplicate of the code for s_set() in read.c
2493 We cannot just call that code as we need to get at the symbol that
2500 /* Especial apologies for the random logic:
2501 This just grew, and could be parsed much more simply!
2503 name = input_line_pointer;
2504 delim = get_symbol_end ();
2505 end_name = input_line_pointer;
2508 if (*input_line_pointer != ',')
2511 as_bad (_("expected comma after name \"%s\""), name);
2513 ignore_rest_of_line ();
2517 input_line_pointer++;
2520 if (name[0] == '.' && name[1] == '\0')
2522 /* XXX - this should not happen to .thumb_set. */
2526 if ((symbolP = symbol_find (name)) == NULL
2527 && (symbolP = md_undefined_symbol (name)) == NULL)
2530 /* When doing symbol listings, play games with dummy fragments living
2531 outside the normal fragment chain to record the file and line info
2533 if (listing & LISTING_SYMBOLS)
2535 extern struct list_info_struct * listing_tail;
2536 fragS * dummy_frag = xmalloc (sizeof (fragS));
2538 memset (dummy_frag, 0, sizeof (fragS));
2539 dummy_frag->fr_type = rs_fill;
2540 dummy_frag->line = listing_tail;
2541 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2542 dummy_frag->fr_symbol = symbolP;
2546 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2549 /* "set" symbols are local unless otherwise specified. */
2550 SF_SET_LOCAL (symbolP);
2551 #endif /* OBJ_COFF */
2552 } /* Make a new symbol. */
2554 symbol_table_insert (symbolP);
2559 && S_IS_DEFINED (symbolP)
2560 && S_GET_SEGMENT (symbolP) != reg_section)
2561 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2563 pseudo_set (symbolP);
2565 demand_empty_rest_of_line ();
2567 /* XXX Now we come to the Thumb specific bit of code. */
2569 THUMB_SET_FUNC (symbolP, 1);
2570 ARM_SET_THUMB (symbolP, 1);
2571 #if defined OBJ_ELF || defined OBJ_COFF
2572 ARM_SET_INTERWORK (symbolP, support_interwork);
2576 /* Directives: Mode selection. */
2578 /* .syntax [unified|divided] - choose the new unified syntax
2579 (same for Arm and Thumb encoding, modulo slight differences in what
2580 can be represented) or the old divergent syntax for each mode. */
2582 s_syntax (int unused ATTRIBUTE_UNUSED)
2586 name = input_line_pointer;
2587 delim = get_symbol_end ();
2589 if (!strcasecmp (name, "unified"))
2590 unified_syntax = TRUE;
2591 else if (!strcasecmp (name, "divided"))
2592 unified_syntax = FALSE;
2595 as_bad (_("unrecognized syntax mode \"%s\""), name);
2598 *input_line_pointer = delim;
2599 demand_empty_rest_of_line ();
2602 /* Directives: sectioning and alignment. */
2604 /* Same as s_align_ptwo but align 0 => align 2. */
2607 s_align (int unused ATTRIBUTE_UNUSED)
2611 long max_alignment = 15;
2613 temp = get_absolute_expression ();
2614 if (temp > max_alignment)
2615 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2618 as_bad (_("alignment negative. 0 assumed."));
2622 if (*input_line_pointer == ',')
2624 input_line_pointer++;
2625 temp_fill = get_absolute_expression ();
2633 /* Only make a frag if we HAVE to. */
2634 if (temp && !need_pass_2)
2635 frag_align (temp, (int) temp_fill, 0);
2636 demand_empty_rest_of_line ();
2638 record_alignment (now_seg, temp);
2642 s_bss (int ignore ATTRIBUTE_UNUSED)
2644 /* We don't support putting frags in the BSS segment, we fake it by
2645 marking in_bss, then looking at s_skip for clues. */
2646 subseg_set (bss_section, 0);
2647 demand_empty_rest_of_line ();
2648 mapping_state (MAP_DATA);
2652 s_even (int ignore ATTRIBUTE_UNUSED)
2654 /* Never make frag if expect extra pass. */
2656 frag_align (1, 0, 0);
2658 record_alignment (now_seg, 1);
2660 demand_empty_rest_of_line ();
2663 /* Directives: Literal pools. */
2665 static literal_pool *
2666 find_literal_pool (void)
2668 literal_pool * pool;
2670 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2672 if (pool->section == now_seg
2673 && pool->sub_section == now_subseg)
2680 static literal_pool *
2681 find_or_make_literal_pool (void)
2683 /* Next literal pool ID number. */
2684 static unsigned int latest_pool_num = 1;
2685 literal_pool * pool;
2687 pool = find_literal_pool ();
2691 /* Create a new pool. */
2692 pool = xmalloc (sizeof (* pool));
2696 pool->next_free_entry = 0;
2697 pool->section = now_seg;
2698 pool->sub_section = now_subseg;
2699 pool->next = list_of_pools;
2700 pool->symbol = NULL;
2702 /* Add it to the list. */
2703 list_of_pools = pool;
2706 /* New pools, and emptied pools, will have a NULL symbol. */
2707 if (pool->symbol == NULL)
2709 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2710 (valueT) 0, &zero_address_frag);
2711 pool->id = latest_pool_num ++;
2718 /* Add the literal in the global 'inst'
2719 structure to the relevent literal pool. */
2722 add_to_lit_pool (void)
2724 literal_pool * pool;
2727 pool = find_or_make_literal_pool ();
2729 /* Check if this literal value is already in the pool. */
2730 for (entry = 0; entry < pool->next_free_entry; entry ++)
2732 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2733 && (inst.reloc.exp.X_op == O_constant)
2734 && (pool->literals[entry].X_add_number
2735 == inst.reloc.exp.X_add_number)
2736 && (pool->literals[entry].X_unsigned
2737 == inst.reloc.exp.X_unsigned))
2740 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2741 && (inst.reloc.exp.X_op == O_symbol)
2742 && (pool->literals[entry].X_add_number
2743 == inst.reloc.exp.X_add_number)
2744 && (pool->literals[entry].X_add_symbol
2745 == inst.reloc.exp.X_add_symbol)
2746 && (pool->literals[entry].X_op_symbol
2747 == inst.reloc.exp.X_op_symbol))
2751 /* Do we need to create a new entry? */
2752 if (entry == pool->next_free_entry)
2754 if (entry >= MAX_LITERAL_POOL_SIZE)
2756 inst.error = _("literal pool overflow");
2760 pool->literals[entry] = inst.reloc.exp;
2761 pool->next_free_entry += 1;
2764 inst.reloc.exp.X_op = O_symbol;
2765 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2766 inst.reloc.exp.X_add_symbol = pool->symbol;
2771 /* Can't use symbol_new here, so have to create a symbol and then at
2772 a later date assign it a value. Thats what these functions do. */
2775 symbol_locate (symbolS * symbolP,
2776 const char * name, /* It is copied, the caller can modify. */
2777 segT segment, /* Segment identifier (SEG_<something>). */
2778 valueT valu, /* Symbol value. */
2779 fragS * frag) /* Associated fragment. */
2781 unsigned int name_length;
2782 char * preserved_copy_of_name;
2784 name_length = strlen (name) + 1; /* +1 for \0. */
2785 obstack_grow (¬es, name, name_length);
2786 preserved_copy_of_name = obstack_finish (¬es);
2788 #ifdef tc_canonicalize_symbol_name
2789 preserved_copy_of_name =
2790 tc_canonicalize_symbol_name (preserved_copy_of_name);
2793 S_SET_NAME (symbolP, preserved_copy_of_name);
2795 S_SET_SEGMENT (symbolP, segment);
2796 S_SET_VALUE (symbolP, valu);
2797 symbol_clear_list_pointers (symbolP);
2799 symbol_set_frag (symbolP, frag);
2801 /* Link to end of symbol chain. */
2803 extern int symbol_table_frozen;
2805 if (symbol_table_frozen)
2809 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2811 obj_symbol_new_hook (symbolP);
2813 #ifdef tc_symbol_new_hook
2814 tc_symbol_new_hook (symbolP);
2818 verify_symbol_chain (symbol_rootP, symbol_lastP);
2819 #endif /* DEBUG_SYMS */
2824 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2827 literal_pool * pool;
2830 pool = find_literal_pool ();
2832 || pool->symbol == NULL
2833 || pool->next_free_entry == 0)
2836 mapping_state (MAP_DATA);
2838 /* Align pool as you have word accesses.
2839 Only make a frag if we have to. */
2841 frag_align (2, 0, 0);
2843 record_alignment (now_seg, 2);
2845 sprintf (sym_name, "$$lit_\002%x", pool->id);
2847 symbol_locate (pool->symbol, sym_name, now_seg,
2848 (valueT) frag_now_fix (), frag_now);
2849 symbol_table_insert (pool->symbol);
2851 ARM_SET_THUMB (pool->symbol, thumb_mode);
2853 #if defined OBJ_COFF || defined OBJ_ELF
2854 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2857 for (entry = 0; entry < pool->next_free_entry; entry ++)
2858 /* First output the expression in the instruction to the pool. */
2859 emit_expr (&(pool->literals[entry]), 4); /* .word */
2861 /* Mark the pool as empty. */
2862 pool->next_free_entry = 0;
2863 pool->symbol = NULL;
2867 /* Forward declarations for functions below, in the MD interface
2869 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2870 static valueT create_unwind_entry (int);
2871 static void start_unwind_section (const segT, int);
2872 static void add_unwind_opcode (valueT, int);
2873 static void flush_pending_unwind (void);
2875 /* Directives: Data. */
2878 s_arm_elf_cons (int nbytes)
2882 #ifdef md_flush_pending_output
2883 md_flush_pending_output ();
2886 if (is_it_end_of_statement ())
2888 demand_empty_rest_of_line ();
2892 #ifdef md_cons_align
2893 md_cons_align (nbytes);
2896 mapping_state (MAP_DATA);
2900 char *base = input_line_pointer;
2904 if (exp.X_op != O_symbol)
2905 emit_expr (&exp, (unsigned int) nbytes);
2908 char *before_reloc = input_line_pointer;
2909 reloc = parse_reloc (&input_line_pointer);
2912 as_bad (_("unrecognized relocation suffix"));
2913 ignore_rest_of_line ();
2916 else if (reloc == BFD_RELOC_UNUSED)
2917 emit_expr (&exp, (unsigned int) nbytes);
2920 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2921 int size = bfd_get_reloc_size (howto);
2923 if (reloc == BFD_RELOC_ARM_PLT32)
2925 as_bad (_("(plt) is only valid on branch targets"));
2926 reloc = BFD_RELOC_UNUSED;
2931 as_bad (_("%s relocations do not fit in %d bytes"),
2932 howto->name, nbytes);
2935 /* We've parsed an expression stopping at O_symbol.
2936 But there may be more expression left now that we
2937 have parsed the relocation marker. Parse it again.
2938 XXX Surely there is a cleaner way to do this. */
2939 char *p = input_line_pointer;
2941 char *save_buf = alloca (input_line_pointer - base);
2942 memcpy (save_buf, base, input_line_pointer - base);
2943 memmove (base + (input_line_pointer - before_reloc),
2944 base, before_reloc - base);
2946 input_line_pointer = base + (input_line_pointer-before_reloc);
2948 memcpy (base, save_buf, p - base);
2950 offset = nbytes - size;
2951 p = frag_more ((int) nbytes);
2952 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2953 size, &exp, 0, reloc);
2958 while (*input_line_pointer++ == ',');
2960 /* Put terminator back into stream. */
2961 input_line_pointer --;
2962 demand_empty_rest_of_line ();
2966 /* Parse a .rel31 directive. */
2969 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2976 if (*input_line_pointer == '1')
2977 highbit = 0x80000000;
2978 else if (*input_line_pointer != '0')
2979 as_bad (_("expected 0 or 1"));
2981 input_line_pointer++;
2982 if (*input_line_pointer != ',')
2983 as_bad (_("missing comma"));
2984 input_line_pointer++;
2986 #ifdef md_flush_pending_output
2987 md_flush_pending_output ();
2990 #ifdef md_cons_align
2994 mapping_state (MAP_DATA);
2999 md_number_to_chars (p, highbit, 4);
3000 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
3001 BFD_RELOC_ARM_PREL31);
3003 demand_empty_rest_of_line ();
3006 /* Directives: AEABI stack-unwind tables. */
3008 /* Parse an unwind_fnstart directive. Simply records the current location. */
3011 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
3013 demand_empty_rest_of_line ();
3014 /* Mark the start of the function. */
3015 unwind.proc_start = expr_build_dot ();
3017 /* Reset the rest of the unwind info. */
3018 unwind.opcode_count = 0;
3019 unwind.table_entry = NULL;
3020 unwind.personality_routine = NULL;
3021 unwind.personality_index = -1;
3022 unwind.frame_size = 0;
3023 unwind.fp_offset = 0;
3026 unwind.sp_restored = 0;
3030 /* Parse a handlerdata directive. Creates the exception handling table entry
3031 for the function. */
3034 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
3036 demand_empty_rest_of_line ();
3037 if (unwind.table_entry)
3038 as_bad (_("dupicate .handlerdata directive"));
3040 create_unwind_entry (1);
3043 /* Parse an unwind_fnend directive. Generates the index table entry. */
3046 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3052 demand_empty_rest_of_line ();
3054 /* Add eh table entry. */
3055 if (unwind.table_entry == NULL)
3056 val = create_unwind_entry (0);
3060 /* Add index table entry. This is two words. */
3061 start_unwind_section (unwind.saved_seg, 1);
3062 frag_align (2, 0, 0);
3063 record_alignment (now_seg, 2);
3065 ptr = frag_more (8);
3066 where = frag_now_fix () - 8;
3068 /* Self relative offset of the function start. */
3069 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3070 BFD_RELOC_ARM_PREL31);
3072 /* Indicate dependency on EHABI-defined personality routines to the
3073 linker, if it hasn't been done already. */
3074 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3075 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3077 static const char *const name[] = {
3078 "__aeabi_unwind_cpp_pr0",
3079 "__aeabi_unwind_cpp_pr1",
3080 "__aeabi_unwind_cpp_pr2"
3082 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3083 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3084 marked_pr_dependency |= 1 << unwind.personality_index;
3085 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3086 = marked_pr_dependency;
3090 /* Inline exception table entry. */
3091 md_number_to_chars (ptr + 4, val, 4);
3093 /* Self relative offset of the table entry. */
3094 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3095 BFD_RELOC_ARM_PREL31);
3097 /* Restore the original section. */
3098 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3102 /* Parse an unwind_cantunwind directive. */
3105 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3107 demand_empty_rest_of_line ();
3108 if (unwind.personality_routine || unwind.personality_index != -1)
3109 as_bad (_("personality routine specified for cantunwind frame"));
3111 unwind.personality_index = -2;
3115 /* Parse a personalityindex directive. */
3118 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3122 if (unwind.personality_routine || unwind.personality_index != -1)
3123 as_bad (_("duplicate .personalityindex directive"));
3127 if (exp.X_op != O_constant
3128 || exp.X_add_number < 0 || exp.X_add_number > 15)
3130 as_bad (_("bad personality routine number"));
3131 ignore_rest_of_line ();
3135 unwind.personality_index = exp.X_add_number;
3137 demand_empty_rest_of_line ();
3141 /* Parse a personality directive. */
3144 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3148 if (unwind.personality_routine || unwind.personality_index != -1)
3149 as_bad (_("duplicate .personality directive"));
3151 name = input_line_pointer;
3152 c = get_symbol_end ();
3153 p = input_line_pointer;
3154 unwind.personality_routine = symbol_find_or_make (name);
3156 demand_empty_rest_of_line ();
3160 /* Parse a directive saving core registers. */
3163 s_arm_unwind_save_core (void)
3169 range = parse_reg_list (&input_line_pointer);
3172 as_bad (_("expected register list"));
3173 ignore_rest_of_line ();
3177 demand_empty_rest_of_line ();
3179 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3180 into .unwind_save {..., sp...}. We aren't bothered about the value of
3181 ip because it is clobbered by calls. */
3182 if (unwind.sp_restored && unwind.fp_reg == 12
3183 && (range & 0x3000) == 0x1000)
3185 unwind.opcode_count--;
3186 unwind.sp_restored = 0;
3187 range = (range | 0x2000) & ~0x1000;
3188 unwind.pending_offset = 0;
3194 /* See if we can use the short opcodes. These pop a block of up to 8
3195 registers starting with r4, plus maybe r14. */
3196 for (n = 0; n < 8; n++)
3198 /* Break at the first non-saved register. */
3199 if ((range & (1 << (n + 4))) == 0)
3202 /* See if there are any other bits set. */
3203 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3205 /* Use the long form. */
3206 op = 0x8000 | ((range >> 4) & 0xfff);
3207 add_unwind_opcode (op, 2);
3211 /* Use the short form. */
3213 op = 0xa8; /* Pop r14. */
3215 op = 0xa0; /* Do not pop r14. */
3217 add_unwind_opcode (op, 1);
3224 op = 0xb100 | (range & 0xf);
3225 add_unwind_opcode (op, 2);
3228 /* Record the number of bytes pushed. */
3229 for (n = 0; n < 16; n++)
3231 if (range & (1 << n))
3232 unwind.frame_size += 4;
3237 /* Parse a directive saving FPA registers. */
3240 s_arm_unwind_save_fpa (int reg)
3246 /* Get Number of registers to transfer. */
3247 if (skip_past_comma (&input_line_pointer) != FAIL)
3250 exp.X_op = O_illegal;
3252 if (exp.X_op != O_constant)
3254 as_bad (_("expected , <constant>"));
3255 ignore_rest_of_line ();
3259 num_regs = exp.X_add_number;
3261 if (num_regs < 1 || num_regs > 4)
3263 as_bad (_("number of registers must be in the range [1:4]"));
3264 ignore_rest_of_line ();
3268 demand_empty_rest_of_line ();
3273 op = 0xb4 | (num_regs - 1);
3274 add_unwind_opcode (op, 1);
3279 op = 0xc800 | (reg << 4) | (num_regs - 1);
3280 add_unwind_opcode (op, 2);
3282 unwind.frame_size += num_regs * 12;
3286 /* Parse a directive saving VFP registers for ARMv6 and above. */
3289 s_arm_unwind_save_vfp_armv6 (void)
3294 int num_vfpv3_regs = 0;
3295 int num_regs_below_16;
3297 count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D);
3300 as_bad (_("expected register list"));
3301 ignore_rest_of_line ();
3305 demand_empty_rest_of_line ();
3307 /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
3308 than FSTMX/FLDMX-style ones). */
3310 /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31. */
3312 num_vfpv3_regs = count;
3313 else if (start + count > 16)
3314 num_vfpv3_regs = start + count - 16;
3316 if (num_vfpv3_regs > 0)
3318 int start_offset = start > 16 ? start - 16 : 0;
3319 op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
3320 add_unwind_opcode (op, 2);
3323 /* Generate opcode for registers numbered in the range 0 .. 15. */
3324 num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
3325 assert (num_regs_below_16 + num_vfpv3_regs == count);
3326 if (num_regs_below_16 > 0)
3328 op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
3329 add_unwind_opcode (op, 2);
3332 unwind.frame_size += count * 8;
3336 /* Parse a directive saving VFP registers for pre-ARMv6. */
3339 s_arm_unwind_save_vfp (void)
3345 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
3348 as_bad (_("expected register list"));
3349 ignore_rest_of_line ();
3353 demand_empty_rest_of_line ();
3358 op = 0xb8 | (count - 1);
3359 add_unwind_opcode (op, 1);
3364 op = 0xb300 | (reg << 4) | (count - 1);
3365 add_unwind_opcode (op, 2);
3367 unwind.frame_size += count * 8 + 4;
3371 /* Parse a directive saving iWMMXt data registers. */
3374 s_arm_unwind_save_mmxwr (void)
3382 if (*input_line_pointer == '{')
3383 input_line_pointer++;
3387 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3391 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3396 as_tsktsk (_("register list not in ascending order"));
3399 if (*input_line_pointer == '-')
3401 input_line_pointer++;
3402 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3405 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3408 else if (reg >= hi_reg)
3410 as_bad (_("bad register range"));
3413 for (; reg < hi_reg; reg++)
3417 while (skip_past_comma (&input_line_pointer) != FAIL);
3419 if (*input_line_pointer == '}')
3420 input_line_pointer++;
3422 demand_empty_rest_of_line ();
3424 /* Generate any deferred opcodes becuuse we're going to be looking at
3426 flush_pending_unwind ();
3428 for (i = 0; i < 16; i++)
3430 if (mask & (1 << i))
3431 unwind.frame_size += 8;
3434 /* Attempt to combine with a previous opcode. We do this because gcc
3435 likes to output separate unwind directives for a single block of
3437 if (unwind.opcode_count > 0)
3439 i = unwind.opcodes[unwind.opcode_count - 1];
3440 if ((i & 0xf8) == 0xc0)
3443 /* Only merge if the blocks are contiguous. */
3446 if ((mask & 0xfe00) == (1 << 9))
3448 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3449 unwind.opcode_count--;
3452 else if (i == 6 && unwind.opcode_count >= 2)
3454 i = unwind.opcodes[unwind.opcode_count - 2];
3458 op = 0xffff << (reg - 1);
3460 && ((mask & op) == (1u << (reg - 1))))
3462 op = (1 << (reg + i + 1)) - 1;
3463 op &= ~((1 << reg) - 1);
3465 unwind.opcode_count -= 2;
3472 /* We want to generate opcodes in the order the registers have been
3473 saved, ie. descending order. */
3474 for (reg = 15; reg >= -1; reg--)
3476 /* Save registers in blocks. */
3478 || !(mask & (1 << reg)))
3480 /* We found an unsaved reg. Generate opcodes to save the
3481 preceeding block. */
3487 op = 0xc0 | (hi_reg - 10);
3488 add_unwind_opcode (op, 1);
3493 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3494 add_unwind_opcode (op, 2);
3503 ignore_rest_of_line ();
3507 s_arm_unwind_save_mmxwcg (void)
3514 if (*input_line_pointer == '{')
3515 input_line_pointer++;
3519 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3523 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3529 as_tsktsk (_("register list not in ascending order"));
3532 if (*input_line_pointer == '-')
3534 input_line_pointer++;
3535 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3538 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3541 else if (reg >= hi_reg)
3543 as_bad (_("bad register range"));
3546 for (; reg < hi_reg; reg++)
3550 while (skip_past_comma (&input_line_pointer) != FAIL);
3552 if (*input_line_pointer == '}')
3553 input_line_pointer++;
3555 demand_empty_rest_of_line ();
3557 /* Generate any deferred opcodes becuuse we're going to be looking at
3559 flush_pending_unwind ();
3561 for (reg = 0; reg < 16; reg++)
3563 if (mask & (1 << reg))
3564 unwind.frame_size += 4;
3567 add_unwind_opcode (op, 2);
3570 ignore_rest_of_line ();
3574 /* Parse an unwind_save directive.
3575 If the argument is non-zero, this is a .vsave directive. */
3578 s_arm_unwind_save (int arch_v6)
3581 struct reg_entry *reg;
3582 bfd_boolean had_brace = FALSE;
3584 /* Figure out what sort of save we have. */
3585 peek = input_line_pointer;
3593 reg = arm_reg_parse_multi (&peek);
3597 as_bad (_("register expected"));
3598 ignore_rest_of_line ();
3607 as_bad (_("FPA .unwind_save does not take a register list"));
3608 ignore_rest_of_line ();
3611 s_arm_unwind_save_fpa (reg->number);
3614 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3617 s_arm_unwind_save_vfp_armv6 ();
3619 s_arm_unwind_save_vfp ();
3621 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3622 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3625 as_bad (_(".unwind_save does not support this kind of register"));
3626 ignore_rest_of_line ();
3631 /* Parse an unwind_movsp directive. */
3634 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3640 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3643 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3644 ignore_rest_of_line ();
3648 /* Optional constant. */
3649 if (skip_past_comma (&input_line_pointer) != FAIL)
3651 if (immediate_for_directive (&offset) == FAIL)
3657 demand_empty_rest_of_line ();
3659 if (reg == REG_SP || reg == REG_PC)
3661 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3665 if (unwind.fp_reg != REG_SP)
3666 as_bad (_("unexpected .unwind_movsp directive"));
3668 /* Generate opcode to restore the value. */
3670 add_unwind_opcode (op, 1);
3672 /* Record the information for later. */
3673 unwind.fp_reg = reg;
3674 unwind.fp_offset = unwind.frame_size - offset;
3675 unwind.sp_restored = 1;
3678 /* Parse an unwind_pad directive. */
3681 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3685 if (immediate_for_directive (&offset) == FAIL)
3690 as_bad (_("stack increment must be multiple of 4"));
3691 ignore_rest_of_line ();
3695 /* Don't generate any opcodes, just record the details for later. */
3696 unwind.frame_size += offset;
3697 unwind.pending_offset += offset;
3699 demand_empty_rest_of_line ();
3702 /* Parse an unwind_setfp directive. */
3705 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3711 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3712 if (skip_past_comma (&input_line_pointer) == FAIL)
3715 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3717 if (fp_reg == FAIL || sp_reg == FAIL)
3719 as_bad (_("expected <reg>, <reg>"));
3720 ignore_rest_of_line ();
3724 /* Optional constant. */
3725 if (skip_past_comma (&input_line_pointer) != FAIL)
3727 if (immediate_for_directive (&offset) == FAIL)
3733 demand_empty_rest_of_line ();
3735 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3737 as_bad (_("register must be either sp or set by a previous"
3738 "unwind_movsp directive"));
3742 /* Don't generate any opcodes, just record the information for later. */
3743 unwind.fp_reg = fp_reg;
3746 unwind.fp_offset = unwind.frame_size - offset;
3748 unwind.fp_offset -= offset;
3751 /* Parse an unwind_raw directive. */
3754 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3757 /* This is an arbitary limit. */
3758 unsigned char op[16];
3762 if (exp.X_op == O_constant
3763 && skip_past_comma (&input_line_pointer) != FAIL)
3765 unwind.frame_size += exp.X_add_number;
3769 exp.X_op = O_illegal;
3771 if (exp.X_op != O_constant)
3773 as_bad (_("expected <offset>, <opcode>"));
3774 ignore_rest_of_line ();
3780 /* Parse the opcode. */
3785 as_bad (_("unwind opcode too long"));
3786 ignore_rest_of_line ();
3788 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3790 as_bad (_("invalid unwind opcode"));
3791 ignore_rest_of_line ();
3794 op[count++] = exp.X_add_number;
3796 /* Parse the next byte. */
3797 if (skip_past_comma (&input_line_pointer) == FAIL)
3803 /* Add the opcode bytes in reverse order. */
3805 add_unwind_opcode (op[count], 1);
3807 demand_empty_rest_of_line ();
3811 /* Parse a .eabi_attribute directive. */
3814 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3817 bfd_boolean is_string;
3824 if (exp.X_op != O_constant)
3827 tag = exp.X_add_number;
3828 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3833 if (skip_past_comma (&input_line_pointer) == FAIL)
3835 if (tag == 32 || !is_string)
3838 if (exp.X_op != O_constant)
3840 as_bad (_("expected numeric constant"));
3841 ignore_rest_of_line ();
3844 i = exp.X_add_number;
3846 if (tag == Tag_compatibility
3847 && skip_past_comma (&input_line_pointer) == FAIL)
3849 as_bad (_("expected comma"));
3850 ignore_rest_of_line ();
3855 skip_whitespace(input_line_pointer);
3856 if (*input_line_pointer != '"')
3858 input_line_pointer++;
3859 s = input_line_pointer;
3860 while (*input_line_pointer && *input_line_pointer != '"')
3861 input_line_pointer++;
3862 if (*input_line_pointer != '"')
3864 saved_char = *input_line_pointer;
3865 *input_line_pointer = 0;
3873 if (tag == Tag_compatibility)
3874 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3876 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3878 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3882 *input_line_pointer = saved_char;
3883 input_line_pointer++;
3885 demand_empty_rest_of_line ();
3888 as_bad (_("bad string constant"));
3889 ignore_rest_of_line ();
3892 as_bad (_("expected <tag> , <value>"));
3893 ignore_rest_of_line ();
3896 static void s_arm_arch (int);
3897 static void s_arm_object_arch (int);
3898 static void s_arm_cpu (int);
3899 static void s_arm_fpu (int);
3900 #endif /* OBJ_ELF */
3902 /* This table describes all the machine specific pseudo-ops the assembler
3903 has to support. The fields are:
3904 pseudo-op name without dot
3905 function to call to execute this pseudo-op
3906 Integer arg to pass to the function. */
3908 const pseudo_typeS md_pseudo_table[] =
3910 /* Never called because '.req' does not start a line. */
3911 { "req", s_req, 0 },
3912 /* Following two are likewise never called. */
3915 { "unreq", s_unreq, 0 },
3916 { "bss", s_bss, 0 },
3917 { "align", s_align, 0 },
3918 { "arm", s_arm, 0 },
3919 { "thumb", s_thumb, 0 },
3920 { "code", s_code, 0 },
3921 { "force_thumb", s_force_thumb, 0 },
3922 { "thumb_func", s_thumb_func, 0 },
3923 { "thumb_set", s_thumb_set, 0 },
3924 { "even", s_even, 0 },
3925 { "ltorg", s_ltorg, 0 },
3926 { "pool", s_ltorg, 0 },
3927 { "syntax", s_syntax, 0 },
3929 { "word", s_arm_elf_cons, 4 },
3930 { "long", s_arm_elf_cons, 4 },
3931 { "rel31", s_arm_rel31, 0 },
3932 { "fnstart", s_arm_unwind_fnstart, 0 },
3933 { "fnend", s_arm_unwind_fnend, 0 },
3934 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3935 { "personality", s_arm_unwind_personality, 0 },
3936 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3937 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3938 { "save", s_arm_unwind_save, 0 },
3939 { "vsave", s_arm_unwind_save, 1 },
3940 { "movsp", s_arm_unwind_movsp, 0 },
3941 { "pad", s_arm_unwind_pad, 0 },
3942 { "setfp", s_arm_unwind_setfp, 0 },
3943 { "unwind_raw", s_arm_unwind_raw, 0 },
3944 { "cpu", s_arm_cpu, 0 },
3945 { "arch", s_arm_arch, 0 },
3946 { "object_arch", s_arm_object_arch, 0 },
3947 { "fpu", s_arm_fpu, 0 },
3948 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3952 { "extend", float_cons, 'x' },
3953 { "ldouble", float_cons, 'x' },
3954 { "packed", float_cons, 'p' },
3958 /* Parser functions used exclusively in instruction operands. */
3960 /* Generic immediate-value read function for use in insn parsing.
3961 STR points to the beginning of the immediate (the leading #);
3962 VAL receives the value; if the value is outside [MIN, MAX]
3963 issue an error. PREFIX_OPT is true if the immediate prefix is
3967 parse_immediate (char **str, int *val, int min, int max,
3968 bfd_boolean prefix_opt)
3971 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3972 if (exp.X_op != O_constant)
3974 inst.error = _("constant expression required");
3978 if (exp.X_add_number < min || exp.X_add_number > max)
3980 inst.error = _("immediate value out of range");
3984 *val = exp.X_add_number;
3988 /* Less-generic immediate-value read function with the possibility of loading a
3989 big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
3990 instructions. Puts the result directly in inst.operands[i]. */
3993 parse_big_immediate (char **str, int i)
3998 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
4000 if (exp.X_op == O_constant)
4002 inst.operands[i].imm = exp.X_add_number & 0xffffffff;
4003 /* If we're on a 64-bit host, then a 64-bit number can be returned using
4004 O_constant. We have to be careful not to break compilation for
4005 32-bit X_add_number, though. */
4006 if ((exp.X_add_number & ~0xffffffffl) != 0)
4008 /* X >> 32 is illegal if sizeof (exp.X_add_number) == 4. */
4009 inst.operands[i].reg = ((exp.X_add_number >> 16) >> 16) & 0xffffffff;
4010 inst.operands[i].regisimm = 1;
4013 else if (exp.X_op == O_big
4014 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
4015 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
4017 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
4018 /* Bignums have their least significant bits in
4019 generic_bignum[0]. Make sure we put 32 bits in imm and
4020 32 bits in reg, in a (hopefully) portable way. */
4021 assert (parts != 0);
4022 inst.operands[i].imm = 0;
4023 for (j = 0; j < parts; j++, idx++)
4024 inst.operands[i].imm |= generic_bignum[idx]
4025 << (LITTLENUM_NUMBER_OF_BITS * j);
4026 inst.operands[i].reg = 0;
4027 for (j = 0; j < parts; j++, idx++)
4028 inst.operands[i].reg |= generic_bignum[idx]
4029 << (LITTLENUM_NUMBER_OF_BITS * j);
4030 inst.operands[i].regisimm = 1;
4040 /* Returns the pseudo-register number of an FPA immediate constant,
4041 or FAIL if there isn't a valid constant here. */
4044 parse_fpa_immediate (char ** str)
4046 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4052 /* First try and match exact strings, this is to guarantee
4053 that some formats will work even for cross assembly. */
4055 for (i = 0; fp_const[i]; i++)
4057 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
4061 *str += strlen (fp_const[i]);
4062 if (is_end_of_line[(unsigned char) **str])
4068 /* Just because we didn't get a match doesn't mean that the constant
4069 isn't valid, just that it is in a format that we don't
4070 automatically recognize. Try parsing it with the standard
4071 expression routines. */
4073 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
4075 /* Look for a raw floating point number. */
4076 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
4077 && is_end_of_line[(unsigned char) *save_in])
4079 for (i = 0; i < NUM_FLOAT_VALS; i++)
4081 for (j = 0; j < MAX_LITTLENUMS; j++)
4083 if (words[j] != fp_values[i][j])
4087 if (j == MAX_LITTLENUMS)
4095 /* Try and parse a more complex expression, this will probably fail
4096 unless the code uses a floating point prefix (eg "0f"). */
4097 save_in = input_line_pointer;
4098 input_line_pointer = *str;
4099 if (expression (&exp) == absolute_section
4100 && exp.X_op == O_big
4101 && exp.X_add_number < 0)
4103 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
4105 if (gen_to_words (words, 5, (long) 15) == 0)
4107 for (i = 0; i < NUM_FLOAT_VALS; i++)
4109 for (j = 0; j < MAX_LITTLENUMS; j++)
4111 if (words[j] != fp_values[i][j])
4115 if (j == MAX_LITTLENUMS)
4117 *str = input_line_pointer;
4118 input_line_pointer = save_in;
4125 *str = input_line_pointer;
4126 input_line_pointer = save_in;
4127 inst.error = _("invalid FPA immediate expression");
4131 /* Returns 1 if a number has "quarter-precision" float format
4132 0baBbbbbbc defgh000 00000000 00000000. */
4135 is_quarter_float (unsigned imm)
4137 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4138 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4141 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4142 0baBbbbbbc defgh000 00000000 00000000.
4143 The zero and minus-zero cases need special handling, since they can't be
4144 encoded in the "quarter-precision" float format, but can nonetheless be
4145 loaded as integer constants. */
4148 parse_qfloat_immediate (char **ccp, int *immed)
4152 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4153 int found_fpchar = 0;
4155 skip_past_char (&str, '#');
4157 /* We must not accidentally parse an integer as a floating-point number. Make
4158 sure that the value we parse is not an integer by checking for special
4159 characters '.' or 'e'.
4160 FIXME: This is a horrible hack, but doing better is tricky because type
4161 information isn't in a very usable state at parse time. A better solution
4162 should be implemented as part of the fix for allowing the full range of
4163 pseudo-instructions to be used in VMOV, etc. */
4165 skip_whitespace (fpnum);
4167 if (strncmp (fpnum, "0x", 2) == 0)
4171 for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
4172 if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
4182 if ((str = atof_ieee (str, 's', words)) != NULL)
4184 unsigned fpword = 0;
4187 /* Our FP word must be 32 bits (single-precision FP). */
4188 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4190 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4194 if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
4207 /* Shift operands. */
4210 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4213 struct asm_shift_name
4216 enum shift_kind kind;
4219 /* Third argument to parse_shift. */
4220 enum parse_shift_mode
4222 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4223 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4224 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4225 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4226 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4229 /* Parse a <shift> specifier on an ARM data processing instruction.
4230 This has three forms:
4232 (LSL|LSR|ASL|ASR|ROR) Rs
4233 (LSL|LSR|ASL|ASR|ROR) #imm
4236 Note that ASL is assimilated to LSL in the instruction encoding, and
4237 RRX to ROR #0 (which cannot be written as such). */
4240 parse_shift (char **str, int i, enum parse_shift_mode mode)
4242 const struct asm_shift_name *shift_name;
4243 enum shift_kind shift;
4248 for (p = *str; ISALPHA (*p); p++)
4253 inst.error = _("shift expression expected");
4257 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4259 if (shift_name == NULL)
4261 inst.error = _("shift expression expected");
4265 shift = shift_name->kind;
4269 case NO_SHIFT_RESTRICT:
4270 case SHIFT_IMMEDIATE: break;
4272 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4273 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4275 inst.error = _("'LSL' or 'ASR' required");
4280 case SHIFT_LSL_IMMEDIATE:
4281 if (shift != SHIFT_LSL)
4283 inst.error = _("'LSL' required");
4288 case SHIFT_ASR_IMMEDIATE:
4289 if (shift != SHIFT_ASR)
4291 inst.error = _("'ASR' required");
4299 if (shift != SHIFT_RRX)
4301 /* Whitespace can appear here if the next thing is a bare digit. */
4302 skip_whitespace (p);
4304 if (mode == NO_SHIFT_RESTRICT
4305 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4307 inst.operands[i].imm = reg;
4308 inst.operands[i].immisreg = 1;
4310 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4313 inst.operands[i].shift_kind = shift;
4314 inst.operands[i].shifted = 1;
4319 /* Parse a <shifter_operand> for an ARM data processing instruction:
4322 #<immediate>, <rotate>
4326 where <shift> is defined by parse_shift above, and <rotate> is a
4327 multiple of 2 between 0 and 30. Validation of immediate operands
4328 is deferred to md_apply_fix. */
4331 parse_shifter_operand (char **str, int i)
4336 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4338 inst.operands[i].reg = value;
4339 inst.operands[i].isreg = 1;
4341 /* parse_shift will override this if appropriate */
4342 inst.reloc.exp.X_op = O_constant;
4343 inst.reloc.exp.X_add_number = 0;
4345 if (skip_past_comma (str) == FAIL)
4348 /* Shift operation on register. */
4349 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4352 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4355 if (skip_past_comma (str) == SUCCESS)
4357 /* #x, y -- ie explicit rotation by Y. */
4358 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4361 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4363 inst.error = _("constant expression expected");
4367 value = expr.X_add_number;
4368 if (value < 0 || value > 30 || value % 2 != 0)
4370 inst.error = _("invalid rotation");
4373 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4375 inst.error = _("invalid constant");
4379 /* Convert to decoded value. md_apply_fix will put it back. */
4380 inst.reloc.exp.X_add_number
4381 = (((inst.reloc.exp.X_add_number << (32 - value))
4382 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4385 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4386 inst.reloc.pc_rel = 0;
4390 /* Group relocation information. Each entry in the table contains the
4391 textual name of the relocation as may appear in assembler source
4392 and must end with a colon.
4393 Along with this textual name are the relocation codes to be used if
4394 the corresponding instruction is an ALU instruction (ADD or SUB only),
4395 an LDR, an LDRS, or an LDC. */
4397 struct group_reloc_table_entry
4408 /* Varieties of non-ALU group relocation. */
4415 static struct group_reloc_table_entry group_reloc_table[] =
4416 { /* Program counter relative: */
4418 BFD_RELOC_ARM_ALU_PC_G0_NC, /* ALU */
4423 BFD_RELOC_ARM_ALU_PC_G0, /* ALU */
4424 BFD_RELOC_ARM_LDR_PC_G0, /* LDR */
4425 BFD_RELOC_ARM_LDRS_PC_G0, /* LDRS */
4426 BFD_RELOC_ARM_LDC_PC_G0 }, /* LDC */
4428 BFD_RELOC_ARM_ALU_PC_G1_NC, /* ALU */
4433 BFD_RELOC_ARM_ALU_PC_G1, /* ALU */
4434 BFD_RELOC_ARM_LDR_PC_G1, /* LDR */
4435 BFD_RELOC_ARM_LDRS_PC_G1, /* LDRS */
4436 BFD_RELOC_ARM_LDC_PC_G1 }, /* LDC */
4438 BFD_RELOC_ARM_ALU_PC_G2, /* ALU */
4439 BFD_RELOC_ARM_LDR_PC_G2, /* LDR */
4440 BFD_RELOC_ARM_LDRS_PC_G2, /* LDRS */
4441 BFD_RELOC_ARM_LDC_PC_G2 }, /* LDC */
4442 /* Section base relative */
4444 BFD_RELOC_ARM_ALU_SB_G0_NC, /* ALU */
4449 BFD_RELOC_ARM_ALU_SB_G0, /* ALU */
4450 BFD_RELOC_ARM_LDR_SB_G0, /* LDR */
4451 BFD_RELOC_ARM_LDRS_SB_G0, /* LDRS */
4452 BFD_RELOC_ARM_LDC_SB_G0 }, /* LDC */
4454 BFD_RELOC_ARM_ALU_SB_G1_NC, /* ALU */
4459 BFD_RELOC_ARM_ALU_SB_G1, /* ALU */
4460 BFD_RELOC_ARM_LDR_SB_G1, /* LDR */
4461 BFD_RELOC_ARM_LDRS_SB_G1, /* LDRS */
4462 BFD_RELOC_ARM_LDC_SB_G1 }, /* LDC */
4464 BFD_RELOC_ARM_ALU_SB_G2, /* ALU */
4465 BFD_RELOC_ARM_LDR_SB_G2, /* LDR */
4466 BFD_RELOC_ARM_LDRS_SB_G2, /* LDRS */
4467 BFD_RELOC_ARM_LDC_SB_G2 } }; /* LDC */
4469 /* Given the address of a pointer pointing to the textual name of a group
4470 relocation as may appear in assembler source, attempt to find its details
4471 in group_reloc_table. The pointer will be updated to the character after
4472 the trailing colon. On failure, FAIL will be returned; SUCCESS
4473 otherwise. On success, *entry will be updated to point at the relevant
4474 group_reloc_table entry. */
4477 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
4480 for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
4482 int length = strlen (group_reloc_table[i].name);
4484 if (strncasecmp (group_reloc_table[i].name, *str, length) == 0 &&
4485 (*str)[length] == ':')
4487 *out = &group_reloc_table[i];
4488 *str += (length + 1);
4496 /* Parse a <shifter_operand> for an ARM data processing instruction
4497 (as for parse_shifter_operand) where group relocations are allowed:
4500 #<immediate>, <rotate>
4501 #:<group_reloc>:<expression>
4505 where <group_reloc> is one of the strings defined in group_reloc_table.
4506 The hashes are optional.
4508 Everything else is as for parse_shifter_operand. */
4510 static parse_operand_result
4511 parse_shifter_operand_group_reloc (char **str, int i)
4513 /* Determine if we have the sequence of characters #: or just :
4514 coming next. If we do, then we check for a group relocation.
4515 If we don't, punt the whole lot to parse_shifter_operand. */
4517 if (((*str)[0] == '#' && (*str)[1] == ':')
4518 || (*str)[0] == ':')
4520 struct group_reloc_table_entry *entry;
4522 if ((*str)[0] == '#')
4527 /* Try to parse a group relocation. Anything else is an error. */
4528 if (find_group_reloc_table_entry (str, &entry) == FAIL)
4530 inst.error = _("unknown group relocation");
4531 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4534 /* We now have the group relocation table entry corresponding to
4535 the name in the assembler source. Next, we parse the expression. */
4536 if (my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX))
4537 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4539 /* Record the relocation type (always the ALU variant here). */
4540 inst.reloc.type = entry->alu_code;
4541 assert (inst.reloc.type != 0);
4543 return PARSE_OPERAND_SUCCESS;
4546 return parse_shifter_operand (str, i) == SUCCESS
4547 ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
4549 /* Never reached. */
4552 /* Parse all forms of an ARM address expression. Information is written
4553 to inst.operands[i] and/or inst.reloc.
4555 Preindexed addressing (.preind=1):
4557 [Rn, #offset] .reg=Rn .reloc.exp=offset
4558 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4559 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4560 .shift_kind=shift .reloc.exp=shift_imm
4562 These three may have a trailing ! which causes .writeback to be set also.
4564 Postindexed addressing (.postind=1, .writeback=1):
4566 [Rn], #offset .reg=Rn .reloc.exp=offset
4567 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4568 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4569 .shift_kind=shift .reloc.exp=shift_imm
4571 Unindexed addressing (.preind=0, .postind=0):
4573 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4577 [Rn]{!} shorthand for [Rn,#0]{!}
4578 =immediate .isreg=0 .reloc.exp=immediate
4579 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4581 It is the caller's responsibility to check for addressing modes not
4582 supported by the instruction, and to set inst.reloc.type. */
4584 static parse_operand_result
4585 parse_address_main (char **str, int i, int group_relocations,
4586 group_reloc_type group_type)
4591 if (skip_past_char (&p, '[') == FAIL)
4593 if (skip_past_char (&p, '=') == FAIL)
4595 /* bare address - translate to PC-relative offset */
4596 inst.reloc.pc_rel = 1;
4597 inst.operands[i].reg = REG_PC;
4598 inst.operands[i].isreg = 1;
4599 inst.operands[i].preind = 1;
4601 /* else a load-constant pseudo op, no special treatment needed here */
4603 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4604 return PARSE_OPERAND_FAIL;
4607 return PARSE_OPERAND_SUCCESS;
4610 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4612 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4613 return PARSE_OPERAND_FAIL;
4615 inst.operands[i].reg = reg;
4616 inst.operands[i].isreg = 1;
4618 if (skip_past_comma (&p) == SUCCESS)
4620 inst.operands[i].preind = 1;
4623 else if (*p == '-') p++, inst.operands[i].negative = 1;
4625 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4627 inst.operands[i].imm = reg;
4628 inst.operands[i].immisreg = 1;
4630 if (skip_past_comma (&p) == SUCCESS)
4631 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4632 return PARSE_OPERAND_FAIL;
4634 else if (skip_past_char (&p, ':') == SUCCESS)
4636 /* FIXME: '@' should be used here, but it's filtered out by generic
4637 code before we get to see it here. This may be subject to
4640 my_get_expression (&exp, &p, GE_NO_PREFIX);
4641 if (exp.X_op != O_constant)
4643 inst.error = _("alignment must be constant");
4644 return PARSE_OPERAND_FAIL;
4646 inst.operands[i].imm = exp.X_add_number << 8;
4647 inst.operands[i].immisalign = 1;
4648 /* Alignments are not pre-indexes. */
4649 inst.operands[i].preind = 0;
4653 if (inst.operands[i].negative)
4655 inst.operands[i].negative = 0;
4659 if (group_relocations &&
4660 ((*p == '#' && *(p + 1) == ':') || *p == ':'))
4663 struct group_reloc_table_entry *entry;
4665 /* Skip over the #: or : sequence. */
4671 /* Try to parse a group relocation. Anything else is an
4673 if (find_group_reloc_table_entry (&p, &entry) == FAIL)
4675 inst.error = _("unknown group relocation");
4676 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4679 /* We now have the group relocation table entry corresponding to
4680 the name in the assembler source. Next, we parse the
4682 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4683 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4685 /* Record the relocation type. */
4689 inst.reloc.type = entry->ldr_code;
4693 inst.reloc.type = entry->ldrs_code;
4697 inst.reloc.type = entry->ldc_code;
4704 if (inst.reloc.type == 0)
4706 inst.error = _("this group relocation is not allowed on this instruction");
4707 return PARSE_OPERAND_FAIL_NO_BACKTRACK;
4711 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4712 return PARSE_OPERAND_FAIL;
4716 if (skip_past_char (&p, ']') == FAIL)
4718 inst.error = _("']' expected");
4719 return PARSE_OPERAND_FAIL;
4722 if (skip_past_char (&p, '!') == SUCCESS)
4723 inst.operands[i].writeback = 1;
4725 else if (skip_past_comma (&p) == SUCCESS)
4727 if (skip_past_char (&p, '{') == SUCCESS)
4729 /* [Rn], {expr} - unindexed, with option */
4730 if (parse_immediate (&p, &inst.operands[i].imm,
4731 0, 255, TRUE) == FAIL)
4732 return PARSE_OPERAND_FAIL;
4734 if (skip_past_char (&p, '}') == FAIL)
4736 inst.error = _("'}' expected at end of 'option' field");
4737 return PARSE_OPERAND_FAIL;
4739 if (inst.operands[i].preind)
4741 inst.error = _("cannot combine index with option");
4742 return PARSE_OPERAND_FAIL;
4745 return PARSE_OPERAND_SUCCESS;
4749 inst.operands[i].postind = 1;
4750 inst.operands[i].writeback = 1;
4752 if (inst.operands[i].preind)
4754 inst.error = _("cannot combine pre- and post-indexing");
4755 return PARSE_OPERAND_FAIL;
4759 else if (*p == '-') p++, inst.operands[i].negative = 1;
4761 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4763 /* We might be using the immediate for alignment already. If we
4764 are, OR the register number into the low-order bits. */
4765 if (inst.operands[i].immisalign)
4766 inst.operands[i].imm |= reg;
4768 inst.operands[i].imm = reg;
4769 inst.operands[i].immisreg = 1;
4771 if (skip_past_comma (&p) == SUCCESS)
4772 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4773 return PARSE_OPERAND_FAIL;
4777 if (inst.operands[i].negative)
4779 inst.operands[i].negative = 0;
4782 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4783 return PARSE_OPERAND_FAIL;
4788 /* If at this point neither .preind nor .postind is set, we have a
4789 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4790 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4792 inst.operands[i].preind = 1;
4793 inst.reloc.exp.X_op = O_constant;
4794 inst.reloc.exp.X_add_number = 0;
4797 return PARSE_OPERAND_SUCCESS;
4801 parse_address (char **str, int i)
4803 return parse_address_main (str, i, 0, 0) == PARSE_OPERAND_SUCCESS
4807 static parse_operand_result
4808 parse_address_group_reloc (char **str, int i, group_reloc_type type)
4810 return parse_address_main (str, i, 1, type);
4813 /* Parse an operand for a MOVW or MOVT instruction. */
4815 parse_half (char **str)
4820 skip_past_char (&p, '#');
4821 if (strncasecmp (p, ":lower16:", 9) == 0)
4822 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4823 else if (strncasecmp (p, ":upper16:", 9) == 0)
4824 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4826 if (inst.reloc.type != BFD_RELOC_UNUSED)
4832 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4835 if (inst.reloc.type == BFD_RELOC_UNUSED)
4837 if (inst.reloc.exp.X_op != O_constant)
4839 inst.error = _("constant expression expected");
4842 if (inst.reloc.exp.X_add_number < 0
4843 || inst.reloc.exp.X_add_number > 0xffff)
4845 inst.error = _("immediate value out of range");
4853 /* Miscellaneous. */
4855 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4856 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4858 parse_psr (char **str)
4861 unsigned long psr_field;
4862 const struct asm_psr *psr;
4865 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4866 feature for ease of use and backwards compatibility. */
4868 if (strncasecmp (p, "SPSR", 4) == 0)
4869 psr_field = SPSR_BIT;
4870 else if (strncasecmp (p, "CPSR", 4) == 0)
4877 while (ISALNUM (*p) || *p == '_');
4879 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4890 /* A suffix follows. */
4896 while (ISALNUM (*p) || *p == '_');
4898 psr = hash_find_n (arm_psr_hsh, start, p - start);
4902 psr_field |= psr->field;
4907 goto error; /* Garbage after "[CS]PSR". */
4909 psr_field |= (PSR_c | PSR_f);
4915 inst.error = _("flag for {c}psr instruction expected");
4919 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4920 value suitable for splatting into the AIF field of the instruction. */
4923 parse_cps_flags (char **str)
4932 case '\0': case ',':
4935 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4936 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4937 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4940 inst.error = _("unrecognized CPS flag");
4945 if (saw_a_flag == 0)
4947 inst.error = _("missing CPS flags");
4955 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4956 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4959 parse_endian_specifier (char **str)
4964 if (strncasecmp (s, "BE", 2))
4966 else if (strncasecmp (s, "LE", 2))
4970 inst.error = _("valid endian specifiers are be or le");
4974 if (ISALNUM (s[2]) || s[2] == '_')
4976 inst.error = _("valid endian specifiers are be or le");
4981 return little_endian;
4984 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4985 value suitable for poking into the rotate field of an sxt or sxta
4986 instruction, or FAIL on error. */
4989 parse_ror (char **str)
4994 if (strncasecmp (s, "ROR", 3) == 0)
4998 inst.error = _("missing rotation field after comma");
5002 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
5007 case 0: *str = s; return 0x0;
5008 case 8: *str = s; return 0x1;
5009 case 16: *str = s; return 0x2;
5010 case 24: *str = s; return 0x3;
5013 inst.error = _("rotation can only be 0, 8, 16, or 24");
5018 /* Parse a conditional code (from conds[] below). The value returned is in the
5019 range 0 .. 14, or FAIL. */
5021 parse_cond (char **str)
5024 const struct asm_cond *c;
5027 while (ISALPHA (*q))
5030 c = hash_find_n (arm_cond_hsh, p, q - p);
5033 inst.error = _("condition required");
5041 /* Parse an option for a barrier instruction. Returns the encoding for the
5044 parse_barrier (char **str)
5047 const struct asm_barrier_opt *o;
5050 while (ISALPHA (*q))
5053 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
5061 /* Parse the operands of a table branch instruction. Similar to a memory
5064 parse_tb (char **str)
5069 if (skip_past_char (&p, '[') == FAIL)
5071 inst.error = _("'[' expected");
5075 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5077 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5080 inst.operands[0].reg = reg;
5082 if (skip_past_comma (&p) == FAIL)
5084 inst.error = _("',' expected");
5088 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5090 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5093 inst.operands[0].imm = reg;
5095 if (skip_past_comma (&p) == SUCCESS)
5097 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
5099 if (inst.reloc.exp.X_add_number != 1)
5101 inst.error = _("invalid shift");
5104 inst.operands[0].shifted = 1;
5107 if (skip_past_char (&p, ']') == FAIL)
5109 inst.error = _("']' expected");
5116 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
5117 information on the types the operands can take and how they are encoded.
5118 Up to four operands may be read; this function handles setting the
5119 ".present" field for each read operand itself.
5120 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
5121 else returns FAIL. */
5124 parse_neon_mov (char **str, int *which_operand)
5126 int i = *which_operand, val;
5127 enum arm_reg_type rtype;
5129 struct neon_type_el optype;
5131 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5133 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
5134 inst.operands[i].reg = val;
5135 inst.operands[i].isscalar = 1;
5136 inst.operands[i].vectype = optype;
5137 inst.operands[i++].present = 1;
5139 if (skip_past_comma (&ptr) == FAIL)
5142 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5145 inst.operands[i].reg = val;
5146 inst.operands[i].isreg = 1;
5147 inst.operands[i].present = 1;
5149 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
5152 /* Cases 0, 1, 2, 3, 5 (D only). */
5153 if (skip_past_comma (&ptr) == FAIL)
5156 inst.operands[i].reg = val;
5157 inst.operands[i].isreg = 1;
5158 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5159 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5160 inst.operands[i].isvec = 1;
5161 inst.operands[i].vectype = optype;
5162 inst.operands[i++].present = 1;
5164 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5166 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
5167 Case 13: VMOV <Sd>, <Rm> */
5168 inst.operands[i].reg = val;
5169 inst.operands[i].isreg = 1;
5170 inst.operands[i].present = 1;
5172 if (rtype == REG_TYPE_NQ)
5174 first_error (_("can't use Neon quad register here"));
5177 else if (rtype != REG_TYPE_VFS)
5180 if (skip_past_comma (&ptr) == FAIL)
5182 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5184 inst.operands[i].reg = val;
5185 inst.operands[i].isreg = 1;
5186 inst.operands[i].present = 1;
5189 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
5190 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
5191 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
5192 Case 10: VMOV.F32 <Sd>, #<imm>
5193 Case 11: VMOV.F64 <Dd>, #<imm> */
5195 else if (parse_big_immediate (&ptr, i) == SUCCESS)
5196 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
5197 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
5199 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
5202 /* Case 0: VMOV<c><q> <Qd>, <Qm>
5203 Case 1: VMOV<c><q> <Dd>, <Dm>
5204 Case 8: VMOV.F32 <Sd>, <Sm>
5205 Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm> */
5207 inst.operands[i].reg = val;
5208 inst.operands[i].isreg = 1;
5209 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
5210 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5211 inst.operands[i].isvec = 1;
5212 inst.operands[i].vectype = optype;
5213 inst.operands[i].present = 1;
5215 if (skip_past_comma (&ptr) == SUCCESS)
5220 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5223 inst.operands[i].reg = val;
5224 inst.operands[i].isreg = 1;
5225 inst.operands[i++].present = 1;
5227 if (skip_past_comma (&ptr) == FAIL)
5230 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
5233 inst.operands[i].reg = val;
5234 inst.operands[i].isreg = 1;
5235 inst.operands[i++].present = 1;
5240 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
5244 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5247 inst.operands[i].reg = val;
5248 inst.operands[i].isreg = 1;
5249 inst.operands[i++].present = 1;
5251 if (skip_past_comma (&ptr) == FAIL)
5254 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
5256 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
5257 inst.operands[i].reg = val;
5258 inst.operands[i].isscalar = 1;
5259 inst.operands[i].present = 1;
5260 inst.operands[i].vectype = optype;
5262 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
5264 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
5265 inst.operands[i].reg = val;
5266 inst.operands[i].isreg = 1;
5267 inst.operands[i++].present = 1;
5269 if (skip_past_comma (&ptr) == FAIL)
5272 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
5275 first_error (_(reg_expected_msgs[REG_TYPE_VFSD]));
5279 inst.operands[i].reg = val;
5280 inst.operands[i].isreg = 1;
5281 inst.operands[i].isvec = 1;
5282 inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
5283 inst.operands[i].vectype = optype;
5284 inst.operands[i].present = 1;
5286 if (rtype == REG_TYPE_VFS)
5290 if (skip_past_comma (&ptr) == FAIL)
5292 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
5295 first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
5298 inst.operands[i].reg = val;
5299 inst.operands[i].isreg = 1;
5300 inst.operands[i].isvec = 1;
5301 inst.operands[i].issingle = 1;
5302 inst.operands[i].vectype = optype;
5303 inst.operands[i].present = 1;
5306 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
5310 inst.operands[i].reg = val;
5311 inst.operands[i].isreg = 1;
5312 inst.operands[i].isvec = 1;
5313 inst.operands[i].issingle = 1;
5314 inst.operands[i].vectype = optype;
5315 inst.operands[i++].present = 1;
5320 first_error (_("parse error"));
5324 /* Successfully parsed the operands. Update args. */
5330 first_error (_("expected comma"));
5334 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
5338 /* Matcher codes for parse_operands. */
5339 enum operand_parse_code
5341 OP_stop, /* end of line */
5343 OP_RR, /* ARM register */
5344 OP_RRnpc, /* ARM register, not r15 */
5345 OP_RRnpcb, /* ARM register, not r15, in square brackets */
5346 OP_RRw, /* ARM register, not r15, optional trailing ! */
5347 OP_RCP, /* Coprocessor number */
5348 OP_RCN, /* Coprocessor register */
5349 OP_RF, /* FPA register */
5350 OP_RVS, /* VFP single precision register */
5351 OP_RVD, /* VFP double precision register (0..15) */
5352 OP_RND, /* Neon double precision register (0..31) */
5353 OP_RNQ, /* Neon quad precision register */
5354 OP_RVSD, /* VFP single or double precision register */
5355 OP_RNDQ, /* Neon double or quad precision register */
5356 OP_RNSDQ, /* Neon single, double or quad precision register */
5357 OP_RNSC, /* Neon scalar D[X] */
5358 OP_RVC, /* VFP control register */
5359 OP_RMF, /* Maverick F register */
5360 OP_RMD, /* Maverick D register */
5361 OP_RMFX, /* Maverick FX register */
5362 OP_RMDX, /* Maverick DX register */
5363 OP_RMAX, /* Maverick AX register */
5364 OP_RMDS, /* Maverick DSPSC register */
5365 OP_RIWR, /* iWMMXt wR register */
5366 OP_RIWC, /* iWMMXt wC register */
5367 OP_RIWG, /* iWMMXt wCG register */
5368 OP_RXA, /* XScale accumulator register */
5370 OP_REGLST, /* ARM register list */
5371 OP_VRSLST, /* VFP single-precision register list */
5372 OP_VRDLST, /* VFP double-precision register list */
5373 OP_VRSDLST, /* VFP single or double-precision register list (& quad) */
5374 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
5375 OP_NSTRLST, /* Neon element/structure list */
5377 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
5378 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
5379 OP_RVSD_I0, /* VFP S or D reg, or immediate zero. */
5380 OP_RR_RNSC, /* ARM reg or Neon scalar. */
5381 OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar. */
5382 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
5383 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
5384 OP_VMOV, /* Neon VMOV operands. */
5385 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
5386 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
5387 OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2. */
5389 OP_I0, /* immediate zero */
5390 OP_I7, /* immediate value 0 .. 7 */
5391 OP_I15, /* 0 .. 15 */
5392 OP_I16, /* 1 .. 16 */
5393 OP_I16z, /* 0 .. 16 */
5394 OP_I31, /* 0 .. 31 */
5395 OP_I31w, /* 0 .. 31, optional trailing ! */
5396 OP_I32, /* 1 .. 32 */
5397 OP_I32z, /* 0 .. 32 */
5398 OP_I63, /* 0 .. 63 */
5399 OP_I63s, /* -64 .. 63 */
5400 OP_I64, /* 1 .. 64 */
5401 OP_I64z, /* 0 .. 64 */
5402 OP_I255, /* 0 .. 255 */
5404 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
5405 OP_I7b, /* 0 .. 7 */
5406 OP_I15b, /* 0 .. 15 */
5407 OP_I31b, /* 0 .. 31 */
5409 OP_SH, /* shifter operand */
5410 OP_SHG, /* shifter operand with possible group relocation */
5411 OP_ADDR, /* Memory address expression (any mode) */
5412 OP_ADDRGLDR, /* Mem addr expr (any mode) with possible LDR group reloc */
5413 OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
5414 OP_ADDRGLDC, /* Mem addr expr (any mode) with possible LDC group reloc */
5415 OP_EXP, /* arbitrary expression */
5416 OP_EXPi, /* same, with optional immediate prefix */
5417 OP_EXPr, /* same, with optional relocation suffix */
5418 OP_HALF, /* 0 .. 65535 or low/high reloc. */
5420 OP_CPSF, /* CPS flags */
5421 OP_ENDI, /* Endianness specifier */
5422 OP_PSR, /* CPSR/SPSR mask for msr */
5423 OP_COND, /* conditional code */
5424 OP_TB, /* Table branch. */
5426 OP_RVC_PSR, /* CPSR/SPSR mask for msr, or VFP control register. */
5427 OP_APSR_RR, /* ARM register or "APSR_nzcv". */
5429 OP_RRnpc_I0, /* ARM register or literal 0 */
5430 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
5431 OP_RR_EXi, /* ARM register or expression with imm prefix */
5432 OP_RF_IF, /* FPA register or immediate */
5433 OP_RIWR_RIWC, /* iWMMXt R or C reg */
5434 OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
5436 /* Optional operands. */
5437 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
5438 OP_oI31b, /* 0 .. 31 */
5439 OP_oI32b, /* 1 .. 32 */
5440 OP_oIffffb, /* 0 .. 65535 */
5441 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5443 OP_oRR, /* ARM register */
5444 OP_oRRnpc, /* ARM register, not the PC */
5445 OP_oRND, /* Optional Neon double precision register */
5446 OP_oRNQ, /* Optional Neon quad precision register */
5447 OP_oRNDQ, /* Optional Neon double or quad precision register */
5448 OP_oRNSDQ, /* Optional single, double or quad precision vector register */
5449 OP_oSHll, /* LSL immediate */
5450 OP_oSHar, /* ASR immediate */
5451 OP_oSHllar, /* LSL or ASR immediate */
5452 OP_oROR, /* ROR 0/8/16/24 */
5453 OP_oBARRIER, /* Option argument for a barrier instruction. */
5455 OP_FIRST_OPTIONAL = OP_oI7b
5458 /* Generic instruction operand parser. This does no encoding and no
5459 semantic validation; it merely squirrels values away in the inst
5460 structure. Returns SUCCESS or FAIL depending on whether the
5461 specified grammar matched. */
5463 parse_operands (char *str, const unsigned char *pattern)
5465 unsigned const char *upat = pattern;
5466 char *backtrack_pos = 0;
5467 const char *backtrack_error = 0;
5468 int i, val, backtrack_index = 0;
5469 enum arm_reg_type rtype;
5470 parse_operand_result result;
5472 #define po_char_or_fail(chr) do { \
5473 if (skip_past_char (&str, chr) == FAIL) \
5477 #define po_reg_or_fail(regtype) do { \
5478 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5479 &inst.operands[i].vectype); \
5482 first_error (_(reg_expected_msgs[regtype])); \
5485 inst.operands[i].reg = val; \
5486 inst.operands[i].isreg = 1; \
5487 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5488 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5489 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5490 || rtype == REG_TYPE_VFD \
5491 || rtype == REG_TYPE_NQ); \
5494 #define po_reg_or_goto(regtype, label) do { \
5495 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5496 &inst.operands[i].vectype); \
5500 inst.operands[i].reg = val; \
5501 inst.operands[i].isreg = 1; \
5502 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5503 inst.operands[i].issingle = (rtype == REG_TYPE_VFS); \
5504 inst.operands[i].isvec = (rtype == REG_TYPE_VFS \
5505 || rtype == REG_TYPE_VFD \
5506 || rtype == REG_TYPE_NQ); \
5509 #define po_imm_or_fail(min, max, popt) do { \
5510 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5512 inst.operands[i].imm = val; \
5515 #define po_scalar_or_goto(elsz, label) do { \
5516 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5519 inst.operands[i].reg = val; \
5520 inst.operands[i].isscalar = 1; \
5523 #define po_misc_or_fail(expr) do { \
5528 #define po_misc_or_fail_no_backtrack(expr) do { \
5530 if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)\
5531 backtrack_pos = 0; \
5532 if (result != PARSE_OPERAND_SUCCESS) \
5536 skip_whitespace (str);
5538 for (i = 0; upat[i] != OP_stop; i++)
5540 if (upat[i] >= OP_FIRST_OPTIONAL)
5542 /* Remember where we are in case we need to backtrack. */
5543 assert (!backtrack_pos);
5544 backtrack_pos = str;
5545 backtrack_error = inst.error;
5546 backtrack_index = i;
5550 po_char_or_fail (',');
5558 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5559 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5560 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5561 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5562 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5563 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5565 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5566 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5567 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5568 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5569 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5570 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5571 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5572 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5573 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5574 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5575 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5576 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5578 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5580 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5581 case OP_RVSD: po_reg_or_fail (REG_TYPE_VFSD); break;
5583 case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ); break;
5585 /* Neon scalar. Using an element size of 8 means that some invalid
5586 scalars are accepted here, so deal with those in later code. */
5587 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5589 /* WARNING: We can expand to two operands here. This has the potential
5590 to totally confuse the backtracking mechanism! It will be OK at
5591 least as long as we don't try to use optional args as well,
5595 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5596 inst.operands[i].present = 1;
5598 skip_past_comma (&str);
5599 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5602 /* Optional register operand was omitted. Unfortunately, it's in
5603 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5604 here (this is a bit grotty). */
5605 inst.operands[i] = inst.operands[i-1];
5606 inst.operands[i-1].present = 0;
5609 /* There's a possibility of getting a 64-bit immediate here, so
5610 we need special handling. */
5611 if (parse_big_immediate (&str, i) == FAIL)
5613 inst.error = _("immediate value is out of range");
5621 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5624 po_imm_or_fail (0, 0, TRUE);
5629 po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
5634 po_scalar_or_goto (8, try_rr);
5637 po_reg_or_fail (REG_TYPE_RN);
5643 po_scalar_or_goto (8, try_nsdq);
5646 po_reg_or_fail (REG_TYPE_NSDQ);
5652 po_scalar_or_goto (8, try_ndq);
5655 po_reg_or_fail (REG_TYPE_NDQ);
5661 po_scalar_or_goto (8, try_vfd);
5664 po_reg_or_fail (REG_TYPE_VFD);
5669 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5670 not careful then bad things might happen. */
5671 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5676 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5679 /* There's a possibility of getting a 64-bit immediate here, so
5680 we need special handling. */
5681 if (parse_big_immediate (&str, i) == FAIL)
5683 inst.error = _("immediate value is out of range");
5691 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5694 po_imm_or_fail (0, 63, TRUE);
5699 po_char_or_fail ('[');
5700 po_reg_or_fail (REG_TYPE_RN);
5701 po_char_or_fail (']');
5705 po_reg_or_fail (REG_TYPE_RN);
5706 if (skip_past_char (&str, '!') == SUCCESS)
5707 inst.operands[i].writeback = 1;
5711 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5712 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5713 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5714 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5715 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5716 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5717 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5718 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5719 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5720 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5721 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5722 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5724 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5726 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5727 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5729 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5730 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5731 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5733 /* Immediate variants */
5735 po_char_or_fail ('{');
5736 po_imm_or_fail (0, 255, TRUE);
5737 po_char_or_fail ('}');
5741 /* The expression parser chokes on a trailing !, so we have
5742 to find it first and zap it. */
5745 while (*s && *s != ',')
5750 inst.operands[i].writeback = 1;
5752 po_imm_or_fail (0, 31, TRUE);
5760 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5765 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5770 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5772 if (inst.reloc.exp.X_op == O_symbol)
5774 val = parse_reloc (&str);
5777 inst.error = _("unrecognized relocation suffix");
5780 else if (val != BFD_RELOC_UNUSED)
5782 inst.operands[i].imm = val;
5783 inst.operands[i].hasreloc = 1;
5788 /* Operand for MOVW or MOVT. */
5790 po_misc_or_fail (parse_half (&str));
5793 /* Register or expression */
5794 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5795 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5797 /* Register or immediate */
5798 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5799 I0: po_imm_or_fail (0, 0, FALSE); break;
5801 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5803 if (!is_immediate_prefix (*str))
5806 val = parse_fpa_immediate (&str);
5809 /* FPA immediates are encoded as registers 8-15.
5810 parse_fpa_immediate has already applied the offset. */
5811 inst.operands[i].reg = val;
5812 inst.operands[i].isreg = 1;
5815 case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
5816 I32z: po_imm_or_fail (0, 32, FALSE); break;
5818 /* Two kinds of register */
5821 struct reg_entry *rege = arm_reg_parse_multi (&str);
5823 || (rege->type != REG_TYPE_MMXWR
5824 && rege->type != REG_TYPE_MMXWC
5825 && rege->type != REG_TYPE_MMXWCG))
5827 inst.error = _("iWMMXt data or control register expected");
5830 inst.operands[i].reg = rege->number;
5831 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5837 struct reg_entry *rege = arm_reg_parse_multi (&str);
5839 || (rege->type != REG_TYPE_MMXWC
5840 && rege->type != REG_TYPE_MMXWCG))
5842 inst.error = _("iWMMXt control register expected");
5845 inst.operands[i].reg = rege->number;
5846 inst.operands[i].isreg = 1;
5851 case OP_CPSF: val = parse_cps_flags (&str); break;
5852 case OP_ENDI: val = parse_endian_specifier (&str); break;
5853 case OP_oROR: val = parse_ror (&str); break;
5854 case OP_PSR: val = parse_psr (&str); break;
5855 case OP_COND: val = parse_cond (&str); break;
5856 case OP_oBARRIER:val = parse_barrier (&str); break;
5859 po_reg_or_goto (REG_TYPE_VFC, try_psr);
5860 inst.operands[i].isvec = 1; /* Mark VFP control reg as vector. */
5863 val = parse_psr (&str);
5867 po_reg_or_goto (REG_TYPE_RN, try_apsr);
5870 /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
5872 if (strncasecmp (str, "APSR_", 5) == 0)
5879 case 'c': found = (found & 1) ? 16 : found | 1; break;
5880 case 'n': found = (found & 2) ? 16 : found | 2; break;
5881 case 'z': found = (found & 4) ? 16 : found | 4; break;
5882 case 'v': found = (found & 8) ? 16 : found | 8; break;
5883 default: found = 16;
5887 inst.operands[i].isvec = 1;
5894 po_misc_or_fail (parse_tb (&str));
5897 /* Register lists */
5899 val = parse_reg_list (&str);
5902 inst.operands[1].writeback = 1;
5908 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5912 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5916 /* Allow Q registers too. */
5917 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5922 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5924 inst.operands[i].issingle = 1;
5929 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5934 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5935 &inst.operands[i].vectype);
5938 /* Addressing modes */
5940 po_misc_or_fail (parse_address (&str, i));
5944 po_misc_or_fail_no_backtrack (
5945 parse_address_group_reloc (&str, i, GROUP_LDR));
5949 po_misc_or_fail_no_backtrack (
5950 parse_address_group_reloc (&str, i, GROUP_LDRS));
5954 po_misc_or_fail_no_backtrack (
5955 parse_address_group_reloc (&str, i, GROUP_LDC));
5959 po_misc_or_fail (parse_shifter_operand (&str, i));
5963 po_misc_or_fail_no_backtrack (
5964 parse_shifter_operand_group_reloc (&str, i));
5968 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5972 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5976 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5980 as_fatal ("unhandled operand code %d", upat[i]);
5983 /* Various value-based sanity checks and shared operations. We
5984 do not signal immediate failures for the register constraints;
5985 this allows a syntax error to take precedence. */
5993 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5994 inst.error = BAD_PC;
6012 inst.operands[i].imm = val;
6019 /* If we get here, this operand was successfully parsed. */
6020 inst.operands[i].present = 1;
6024 inst.error = BAD_ARGS;
6029 /* The parse routine should already have set inst.error, but set a
6030 defaut here just in case. */
6032 inst.error = _("syntax error");
6036 /* Do not backtrack over a trailing optional argument that
6037 absorbed some text. We will only fail again, with the
6038 'garbage following instruction' error message, which is
6039 probably less helpful than the current one. */
6040 if (backtrack_index == i && backtrack_pos != str
6041 && upat[i+1] == OP_stop)
6044 inst.error = _("syntax error");
6048 /* Try again, skipping the optional argument at backtrack_pos. */
6049 str = backtrack_pos;
6050 inst.error = backtrack_error;
6051 inst.operands[backtrack_index].present = 0;
6052 i = backtrack_index;
6056 /* Check that we have parsed all the arguments. */
6057 if (*str != '\0' && !inst.error)
6058 inst.error = _("garbage following instruction");
6060 return inst.error ? FAIL : SUCCESS;
6063 #undef po_char_or_fail
6064 #undef po_reg_or_fail
6065 #undef po_reg_or_goto
6066 #undef po_imm_or_fail
6067 #undef po_scalar_or_fail
6069 /* Shorthand macro for instruction encoding functions issuing errors. */
6070 #define constraint(expr, err) do { \
6078 /* Functions for operand encoding. ARM, then Thumb. */
6080 #define rotate_left(v, n) (v << n | v >> (32 - n))
6082 /* If VAL can be encoded in the immediate field of an ARM instruction,
6083 return the encoded form. Otherwise, return FAIL. */
6086 encode_arm_immediate (unsigned int val)
6090 for (i = 0; i < 32; i += 2)
6091 if ((a = rotate_left (val, i)) <= 0xff)
6092 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
6097 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
6098 return the encoded form. Otherwise, return FAIL. */
6100 encode_thumb32_immediate (unsigned int val)
6107 for (i = 1; i <= 24; i++)
6110 if ((val & ~(0xff << i)) == 0)
6111 return ((val >> i) & 0x7f) | ((32 - i) << 7);
6115 if (val == ((a << 16) | a))
6117 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
6121 if (val == ((a << 16) | a))
6122 return 0x200 | (a >> 8);
6126 /* Encode a VFP SP or DP register number into inst.instruction. */
6129 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
6131 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
6134 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
6137 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
6140 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
6145 first_error (_("D register out of range for selected VFP version"));
6153 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
6157 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
6161 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
6165 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
6169 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
6173 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
6181 /* Encode a <shift> in an ARM-format instruction. The immediate,
6182 if any, is handled by md_apply_fix. */
6184 encode_arm_shift (int i)
6186 if (inst.operands[i].shift_kind == SHIFT_RRX)
6187 inst.instruction |= SHIFT_ROR << 5;
6190 inst.instruction |= inst.operands[i].shift_kind << 5;
6191 if (inst.operands[i].immisreg)
6193 inst.instruction |= SHIFT_BY_REG;
6194 inst.instruction |= inst.operands[i].imm << 8;
6197 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6202 encode_arm_shifter_operand (int i)
6204 if (inst.operands[i].isreg)
6206 inst.instruction |= inst.operands[i].reg;
6207 encode_arm_shift (i);
6210 inst.instruction |= INST_IMMEDIATE;
6213 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
6215 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
6217 assert (inst.operands[i].isreg);
6218 inst.instruction |= inst.operands[i].reg << 16;
6220 if (inst.operands[i].preind)
6224 inst.error = _("instruction does not accept preindexed addressing");
6227 inst.instruction |= PRE_INDEX;
6228 if (inst.operands[i].writeback)
6229 inst.instruction |= WRITE_BACK;
6232 else if (inst.operands[i].postind)
6234 assert (inst.operands[i].writeback);
6236 inst.instruction |= WRITE_BACK;
6238 else /* unindexed - only for coprocessor */
6240 inst.error = _("instruction does not accept unindexed addressing");
6244 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
6245 && (((inst.instruction & 0x000f0000) >> 16)
6246 == ((inst.instruction & 0x0000f000) >> 12)))
6247 as_warn ((inst.instruction & LOAD_BIT)
6248 ? _("destination register same as write-back base")
6249 : _("source register same as write-back base"));
6252 /* inst.operands[i] was set up by parse_address. Encode it into an
6253 ARM-format mode 2 load or store instruction. If is_t is true,
6254 reject forms that cannot be used with a T instruction (i.e. not
6257 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
6259 encode_arm_addr_mode_common (i, is_t);
6261 if (inst.operands[i].immisreg)
6263 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
6264 inst.instruction |= inst.operands[i].imm;
6265 if (!inst.operands[i].negative)
6266 inst.instruction |= INDEX_UP;
6267 if (inst.operands[i].shifted)
6269 if (inst.operands[i].shift_kind == SHIFT_RRX)
6270 inst.instruction |= SHIFT_ROR << 5;
6273 inst.instruction |= inst.operands[i].shift_kind << 5;
6274 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6278 else /* immediate offset in inst.reloc */
6280 if (inst.reloc.type == BFD_RELOC_UNUSED)
6281 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
6285 /* inst.operands[i] was set up by parse_address. Encode it into an
6286 ARM-format mode 3 load or store instruction. Reject forms that
6287 cannot be used with such instructions. If is_t is true, reject
6288 forms that cannot be used with a T instruction (i.e. not
6291 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
6293 if (inst.operands[i].immisreg && inst.operands[i].shifted)
6295 inst.error = _("instruction does not accept scaled register index");
6299 encode_arm_addr_mode_common (i, is_t);
6301 if (inst.operands[i].immisreg)
6303 inst.instruction |= inst.operands[i].imm;
6304 if (!inst.operands[i].negative)
6305 inst.instruction |= INDEX_UP;
6307 else /* immediate offset in inst.reloc */
6309 inst.instruction |= HWOFFSET_IMM;
6310 if (inst.reloc.type == BFD_RELOC_UNUSED)
6311 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
6315 /* inst.operands[i] was set up by parse_address. Encode it into an
6316 ARM-format instruction. Reject all forms which cannot be encoded
6317 into a coprocessor load/store instruction. If wb_ok is false,
6318 reject use of writeback; if unind_ok is false, reject use of
6319 unindexed addressing. If reloc_override is not 0, use it instead
6320 of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
6321 (in which case it is preserved). */
6324 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
6326 inst.instruction |= inst.operands[i].reg << 16;
6328 assert (!(inst.operands[i].preind && inst.operands[i].postind));
6330 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
6332 assert (!inst.operands[i].writeback);
6335 inst.error = _("instruction does not support unindexed addressing");
6338 inst.instruction |= inst.operands[i].imm;
6339 inst.instruction |= INDEX_UP;
6343 if (inst.operands[i].preind)
6344 inst.instruction |= PRE_INDEX;
6346 if (inst.operands[i].writeback)
6348 if (inst.operands[i].reg == REG_PC)
6350 inst.error = _("pc may not be used with write-back");
6355 inst.error = _("instruction does not support writeback");
6358 inst.instruction |= WRITE_BACK;
6362 inst.reloc.type = reloc_override;
6363 else if ((inst.reloc.type < BFD_RELOC_ARM_ALU_PC_G0_NC
6364 || inst.reloc.type > BFD_RELOC_ARM_LDC_SB_G2)
6365 && inst.reloc.type != BFD_RELOC_ARM_LDR_PC_G0)
6368 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
6370 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
6376 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
6377 Determine whether it can be performed with a move instruction; if
6378 it can, convert inst.instruction to that move instruction and
6379 return 1; if it can't, convert inst.instruction to a literal-pool
6380 load and return 0. If this is not a valid thing to do in the
6381 current context, set inst.error and return 1.
6383 inst.operands[i] describes the destination register. */
6386 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
6391 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
6395 if ((inst.instruction & tbit) == 0)
6397 inst.error = _("invalid pseudo operation");
6400 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
6402 inst.error = _("constant expression expected");
6405 if (inst.reloc.exp.X_op == O_constant)
6409 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
6411 /* This can be done with a mov(1) instruction. */
6412 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
6413 inst.instruction |= inst.reloc.exp.X_add_number;
6419 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
6422 /* This can be done with a mov instruction. */
6423 inst.instruction &= LITERAL_MASK;
6424 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
6425 inst.instruction |= value & 0xfff;
6429 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
6432 /* This can be done with a mvn instruction. */
6433 inst.instruction &= LITERAL_MASK;
6434 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
6435 inst.instruction |= value & 0xfff;
6441 if (add_to_lit_pool () == FAIL)
6443 inst.error = _("literal pool insertion failed");
6446 inst.operands[1].reg = REG_PC;
6447 inst.operands[1].isreg = 1;
6448 inst.operands[1].preind = 1;
6449 inst.reloc.pc_rel = 1;
6450 inst.reloc.type = (thumb_p
6451 ? BFD_RELOC_ARM_THUMB_OFFSET
6453 ? BFD_RELOC_ARM_HWLITERAL
6454 : BFD_RELOC_ARM_LITERAL));
6458 /* Functions for instruction encoding, sorted by subarchitecture.
6459 First some generics; their names are taken from the conventional
6460 bit positions for register arguments in ARM format instructions. */
6470 inst.instruction |= inst.operands[0].reg << 12;
6476 inst.instruction |= inst.operands[0].reg << 12;
6477 inst.instruction |= inst.operands[1].reg;
6483 inst.instruction |= inst.operands[0].reg << 12;
6484 inst.instruction |= inst.operands[1].reg << 16;
6490 inst.instruction |= inst.operands[0].reg << 16;
6491 inst.instruction |= inst.operands[1].reg << 12;
6497 unsigned Rn = inst.operands[2].reg;
6498 /* Enforce resutrictions on SWP instruction. */
6499 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
6500 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
6501 _("Rn must not overlap other operands"));
6502 inst.instruction |= inst.operands[0].reg << 12;
6503 inst.instruction |= inst.operands[1].reg;
6504 inst.instruction |= Rn << 16;
6510 inst.instruction |= inst.operands[0].reg << 12;
6511 inst.instruction |= inst.operands[1].reg << 16;
6512 inst.instruction |= inst.operands[2].reg;
6518 inst.instruction |= inst.operands[0].reg;
6519 inst.instruction |= inst.operands[1].reg << 12;
6520 inst.instruction |= inst.operands[2].reg << 16;
6526 inst.instruction |= inst.operands[0].imm;
6532 inst.instruction |= inst.operands[0].reg << 12;
6533 encode_arm_cp_address (1, TRUE, TRUE, 0);
6536 /* ARM instructions, in alphabetical order by function name (except
6537 that wrapper functions appear immediately after the function they
6540 /* This is a pseudo-op of the form "adr rd, label" to be converted
6541 into a relative address of the form "add rd, pc, #label-.-8". */
6546 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6548 /* Frag hacking will turn this into a sub instruction if the offset turns
6549 out to be negative. */
6550 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6551 inst.reloc.pc_rel = 1;
6552 inst.reloc.exp.X_add_number -= 8;
6555 /* This is a pseudo-op of the form "adrl rd, label" to be converted
6556 into a relative address of the form:
6557 add rd, pc, #low(label-.-8)"
6558 add rd, rd, #high(label-.-8)" */
6563 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
6565 /* Frag hacking will turn this into a sub instruction if the offset turns
6566 out to be negative. */
6567 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
6568 inst.reloc.pc_rel = 1;
6569 inst.size = INSN_SIZE * 2;
6570 inst.reloc.exp.X_add_number -= 8;
6576 if (!inst.operands[1].present)
6577 inst.operands[1].reg = inst.operands[0].reg;
6578 inst.instruction |= inst.operands[0].reg << 12;
6579 inst.instruction |= inst.operands[1].reg << 16;
6580 encode_arm_shifter_operand (2);
6586 if (inst.operands[0].present)
6588 constraint ((inst.instruction & 0xf0) != 0x40
6589 && inst.operands[0].imm != 0xf,
6590 "bad barrier type");
6591 inst.instruction |= inst.operands[0].imm;
6594 inst.instruction |= 0xf;
6600 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6601 constraint (msb > 32, _("bit-field extends past end of register"));
6602 /* The instruction encoding stores the LSB and MSB,
6603 not the LSB and width. */
6604 inst.instruction |= inst.operands[0].reg << 12;
6605 inst.instruction |= inst.operands[1].imm << 7;
6606 inst.instruction |= (msb - 1) << 16;
6614 /* #0 in second position is alternative syntax for bfc, which is
6615 the same instruction but with REG_PC in the Rm field. */
6616 if (!inst.operands[1].isreg)
6617 inst.operands[1].reg = REG_PC;
6619 msb = inst.operands[2].imm + inst.operands[3].imm;
6620 constraint (msb > 32, _("bit-field extends past end of register"));
6621 /* The instruction encoding stores the LSB and MSB,
6622 not the LSB and width. */
6623 inst.instruction |= inst.operands[0].reg << 12;
6624 inst.instruction |= inst.operands[1].reg;
6625 inst.instruction |= inst.operands[2].imm << 7;
6626 inst.instruction |= (msb - 1) << 16;
6632 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6633 _("bit-field extends past end of register"));
6634 inst.instruction |= inst.operands[0].reg << 12;
6635 inst.instruction |= inst.operands[1].reg;
6636 inst.instruction |= inst.operands[2].imm << 7;
6637 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6640 /* ARM V5 breakpoint instruction (argument parse)
6641 BKPT <16 bit unsigned immediate>
6642 Instruction is not conditional.
6643 The bit pattern given in insns[] has the COND_ALWAYS condition,
6644 and it is an error if the caller tried to override that. */
6649 /* Top 12 of 16 bits to bits 19:8. */
6650 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6652 /* Bottom 4 of 16 bits to bits 3:0. */
6653 inst.instruction |= inst.operands[0].imm & 0xf;
6657 encode_branch (int default_reloc)
6659 if (inst.operands[0].hasreloc)
6661 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6662 _("the only suffix valid here is '(plt)'"));
6663 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6667 inst.reloc.type = default_reloc;
6669 inst.reloc.pc_rel = 1;
6676 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6677 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6680 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6687 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6689 if (inst.cond == COND_ALWAYS)
6690 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6692 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6696 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6699 /* ARM V5 branch-link-exchange instruction (argument parse)
6700 BLX <target_addr> ie BLX(1)
6701 BLX{<condition>} <Rm> ie BLX(2)
6702 Unfortunately, there are two different opcodes for this mnemonic.
6703 So, the insns[].value is not used, and the code here zaps values
6704 into inst.instruction.
6705 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6710 if (inst.operands[0].isreg)
6712 /* Arg is a register; the opcode provided by insns[] is correct.
6713 It is not illegal to do "blx pc", just useless. */
6714 if (inst.operands[0].reg == REG_PC)
6715 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6717 inst.instruction |= inst.operands[0].reg;
6721 /* Arg is an address; this instruction cannot be executed
6722 conditionally, and the opcode must be adjusted. */
6723 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6724 inst.instruction = 0xfa000000;
6726 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6727 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6730 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6737 if (inst.operands[0].reg == REG_PC)
6738 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6740 inst.instruction |= inst.operands[0].reg;
6744 /* ARM v5TEJ. Jump to Jazelle code. */
6749 if (inst.operands[0].reg == REG_PC)
6750 as_tsktsk (_("use of r15 in bxj is not really useful"));
6752 inst.instruction |= inst.operands[0].reg;
6755 /* Co-processor data operation:
6756 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6757 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6761 inst.instruction |= inst.operands[0].reg << 8;
6762 inst.instruction |= inst.operands[1].imm << 20;
6763 inst.instruction |= inst.operands[2].reg << 12;
6764 inst.instruction |= inst.operands[3].reg << 16;
6765 inst.instruction |= inst.operands[4].reg;
6766 inst.instruction |= inst.operands[5].imm << 5;
6772 inst.instruction |= inst.operands[0].reg << 16;
6773 encode_arm_shifter_operand (1);
6776 /* Transfer between coprocessor and ARM registers.
6777 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6782 No special properties. */
6787 inst.instruction |= inst.operands[0].reg << 8;
6788 inst.instruction |= inst.operands[1].imm << 21;
6789 inst.instruction |= inst.operands[2].reg << 12;
6790 inst.instruction |= inst.operands[3].reg << 16;
6791 inst.instruction |= inst.operands[4].reg;
6792 inst.instruction |= inst.operands[5].imm << 5;
6795 /* Transfer between coprocessor register and pair of ARM registers.
6796 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6801 Two XScale instructions are special cases of these:
6803 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6804 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6806 Result unpredicatable if Rd or Rn is R15. */
6811 inst.instruction |= inst.operands[0].reg << 8;
6812 inst.instruction |= inst.operands[1].imm << 4;
6813 inst.instruction |= inst.operands[2].reg << 12;
6814 inst.instruction |= inst.operands[3].reg << 16;
6815 inst.instruction |= inst.operands[4].reg;
6821 inst.instruction |= inst.operands[0].imm << 6;
6822 inst.instruction |= inst.operands[1].imm;
6828 inst.instruction |= inst.operands[0].imm;
6834 /* There is no IT instruction in ARM mode. We
6835 process it but do not generate code for it. */
6842 int base_reg = inst.operands[0].reg;
6843 int range = inst.operands[1].imm;
6845 inst.instruction |= base_reg << 16;
6846 inst.instruction |= range;
6848 if (inst.operands[1].writeback)
6849 inst.instruction |= LDM_TYPE_2_OR_3;
6851 if (inst.operands[0].writeback)
6853 inst.instruction |= WRITE_BACK;
6854 /* Check for unpredictable uses of writeback. */
6855 if (inst.instruction & LOAD_BIT)
6857 /* Not allowed in LDM type 2. */
6858 if ((inst.instruction & LDM_TYPE_2_OR_3)
6859 && ((range & (1 << REG_PC)) == 0))
6860 as_warn (_("writeback of base register is UNPREDICTABLE"));
6861 /* Only allowed if base reg not in list for other types. */
6862 else if (range & (1 << base_reg))
6863 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6867 /* Not allowed for type 2. */
6868 if (inst.instruction & LDM_TYPE_2_OR_3)
6869 as_warn (_("writeback of base register is UNPREDICTABLE"));
6870 /* Only allowed if base reg not in list, or first in list. */
6871 else if ((range & (1 << base_reg))
6872 && (range & ((1 << base_reg) - 1)))
6873 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6878 /* ARMv5TE load-consecutive (argument parse)
6887 constraint (inst.operands[0].reg % 2 != 0,
6888 _("first destination register must be even"));
6889 constraint (inst.operands[1].present
6890 && inst.operands[1].reg != inst.operands[0].reg + 1,
6891 _("can only load two consecutive registers"));
6892 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6893 constraint (!inst.operands[2].isreg, _("'[' expected"));
6895 if (!inst.operands[1].present)
6896 inst.operands[1].reg = inst.operands[0].reg + 1;
6898 if (inst.instruction & LOAD_BIT)
6900 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6901 register and the first register written; we have to diagnose
6902 overlap between the base and the second register written here. */
6904 if (inst.operands[2].reg == inst.operands[1].reg
6905 && (inst.operands[2].writeback || inst.operands[2].postind))
6906 as_warn (_("base register written back, and overlaps "
6907 "second destination register"));
6909 /* For an index-register load, the index register must not overlap the
6910 destination (even if not write-back). */
6911 else if (inst.operands[2].immisreg
6912 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6913 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6914 as_warn (_("index register overlaps destination register"));
6917 inst.instruction |= inst.operands[0].reg << 12;
6918 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6924 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6925 || inst.operands[1].postind || inst.operands[1].writeback
6926 || inst.operands[1].immisreg || inst.operands[1].shifted
6927 || inst.operands[1].negative
6928 /* This can arise if the programmer has written
6930 or if they have mistakenly used a register name as the last
6933 It is very difficult to distinguish between these two cases
6934 because "rX" might actually be a label. ie the register
6935 name has been occluded by a symbol of the same name. So we
6936 just generate a general 'bad addressing mode' type error
6937 message and leave it up to the programmer to discover the
6938 true cause and fix their mistake. */
6939 || (inst.operands[1].reg == REG_PC),
6942 constraint (inst.reloc.exp.X_op != O_constant
6943 || inst.reloc.exp.X_add_number != 0,
6944 _("offset must be zero in ARM encoding"));
6946 inst.instruction |= inst.operands[0].reg << 12;
6947 inst.instruction |= inst.operands[1].reg << 16;
6948 inst.reloc.type = BFD_RELOC_UNUSED;
6954 constraint (inst.operands[0].reg % 2 != 0,
6955 _("even register required"));
6956 constraint (inst.operands[1].present
6957 && inst.operands[1].reg != inst.operands[0].reg + 1,
6958 _("can only load two consecutive registers"));
6959 /* If op 1 were present and equal to PC, this function wouldn't
6960 have been called in the first place. */
6961 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6963 inst.instruction |= inst.operands[0].reg << 12;
6964 inst.instruction |= inst.operands[2].reg << 16;
6970 inst.instruction |= inst.operands[0].reg << 12;
6971 if (!inst.operands[1].isreg)
6972 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6974 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6980 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6982 if (inst.operands[1].preind)
6984 constraint (inst.reloc.exp.X_op != O_constant ||
6985 inst.reloc.exp.X_add_number != 0,
6986 _("this instruction requires a post-indexed address"));
6988 inst.operands[1].preind = 0;
6989 inst.operands[1].postind = 1;
6990 inst.operands[1].writeback = 1;
6992 inst.instruction |= inst.operands[0].reg << 12;
6993 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6996 /* Halfword and signed-byte load/store operations. */
7001 inst.instruction |= inst.operands[0].reg << 12;
7002 if (!inst.operands[1].isreg)
7003 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
7005 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
7011 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
7013 if (inst.operands[1].preind)
7015 constraint (inst.reloc.exp.X_op != O_constant ||
7016 inst.reloc.exp.X_add_number != 0,
7017 _("this instruction requires a post-indexed address"));
7019 inst.operands[1].preind = 0;
7020 inst.operands[1].postind = 1;
7021 inst.operands[1].writeback = 1;
7023 inst.instruction |= inst.operands[0].reg << 12;
7024 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
7027 /* Co-processor register load/store.
7028 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
7032 inst.instruction |= inst.operands[0].reg << 8;
7033 inst.instruction |= inst.operands[1].reg << 12;
7034 encode_arm_cp_address (2, TRUE, TRUE, 0);
7040 /* This restriction does not apply to mls (nor to mla in v6, but
7041 that's hard to detect at present). */
7042 if (inst.operands[0].reg == inst.operands[1].reg
7043 && !(inst.instruction & 0x00400000))
7044 as_tsktsk (_("rd and rm should be different in mla"));
7046 inst.instruction |= inst.operands[0].reg << 16;
7047 inst.instruction |= inst.operands[1].reg;
7048 inst.instruction |= inst.operands[2].reg << 8;
7049 inst.instruction |= inst.operands[3].reg << 12;
7056 inst.instruction |= inst.operands[0].reg << 12;
7057 encode_arm_shifter_operand (1);
7060 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
7067 top = (inst.instruction & 0x00400000) != 0;
7068 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
7069 _(":lower16: not allowed this instruction"));
7070 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
7071 _(":upper16: not allowed instruction"));
7072 inst.instruction |= inst.operands[0].reg << 12;
7073 if (inst.reloc.type == BFD_RELOC_UNUSED)
7075 imm = inst.reloc.exp.X_add_number;
7076 /* The value is in two pieces: 0:11, 16:19. */
7077 inst.instruction |= (imm & 0x00000fff);
7078 inst.instruction |= (imm & 0x0000f000) << 4;
7082 static void do_vfp_nsyn_opcode (const char *);
7085 do_vfp_nsyn_mrs (void)
7087 if (inst.operands[0].isvec)
7089 if (inst.operands[1].reg != 1)
7090 first_error (_("operand 1 must be FPSCR"));
7091 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
7092 memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
7093 do_vfp_nsyn_opcode ("fmstat");
7095 else if (inst.operands[1].isvec)
7096 do_vfp_nsyn_opcode ("fmrx");
7104 do_vfp_nsyn_msr (void)
7106 if (inst.operands[0].isvec)
7107 do_vfp_nsyn_opcode ("fmxr");
7117 if (do_vfp_nsyn_mrs () == SUCCESS)
7120 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
7121 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
7123 _("'CPSR' or 'SPSR' expected"));
7124 inst.instruction |= inst.operands[0].reg << 12;
7125 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
7128 /* Two possible forms:
7129 "{C|S}PSR_<field>, Rm",
7130 "{C|S}PSR_f, #expression". */
7135 if (do_vfp_nsyn_msr () == SUCCESS)
7138 inst.instruction |= inst.operands[0].imm;
7139 if (inst.operands[1].isreg)
7140 inst.instruction |= inst.operands[1].reg;
7143 inst.instruction |= INST_IMMEDIATE;
7144 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
7145 inst.reloc.pc_rel = 0;
7152 if (!inst.operands[2].present)
7153 inst.operands[2].reg = inst.operands[0].reg;
7154 inst.instruction |= inst.operands[0].reg << 16;
7155 inst.instruction |= inst.operands[1].reg;
7156 inst.instruction |= inst.operands[2].reg << 8;
7158 if (inst.operands[0].reg == inst.operands[1].reg)
7159 as_tsktsk (_("rd and rm should be different in mul"));
7162 /* Long Multiply Parser
7163 UMULL RdLo, RdHi, Rm, Rs
7164 SMULL RdLo, RdHi, Rm, Rs
7165 UMLAL RdLo, RdHi, Rm, Rs
7166 SMLAL RdLo, RdHi, Rm, Rs. */
7171 inst.instruction |= inst.operands[0].reg << 12;
7172 inst.instruction |= inst.operands[1].reg << 16;
7173 inst.instruction |= inst.operands[2].reg;
7174 inst.instruction |= inst.operands[3].reg << 8;
7176 /* rdhi, rdlo and rm must all be different. */
7177 if (inst.operands[0].reg == inst.operands[1].reg
7178 || inst.operands[0].reg == inst.operands[2].reg
7179 || inst.operands[1].reg == inst.operands[2].reg)
7180 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
7186 if (inst.operands[0].present)
7188 /* Architectural NOP hints are CPSR sets with no bits selected. */
7189 inst.instruction &= 0xf0000000;
7190 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
7194 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
7195 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
7196 Condition defaults to COND_ALWAYS.
7197 Error if Rd, Rn or Rm are R15. */
7202 inst.instruction |= inst.operands[0].reg << 12;
7203 inst.instruction |= inst.operands[1].reg << 16;
7204 inst.instruction |= inst.operands[2].reg;
7205 if (inst.operands[3].present)
7206 encode_arm_shift (3);
7209 /* ARM V6 PKHTB (Argument Parse). */
7214 if (!inst.operands[3].present)
7216 /* If the shift specifier is omitted, turn the instruction
7217 into pkhbt rd, rm, rn. */
7218 inst.instruction &= 0xfff00010;
7219 inst.instruction |= inst.operands[0].reg << 12;
7220 inst.instruction |= inst.operands[1].reg;
7221 inst.instruction |= inst.operands[2].reg << 16;
7225 inst.instruction |= inst.operands[0].reg << 12;
7226 inst.instruction |= inst.operands[1].reg << 16;
7227 inst.instruction |= inst.operands[2].reg;
7228 encode_arm_shift (3);
7232 /* ARMv5TE: Preload-Cache
7236 Syntactically, like LDR with B=1, W=0, L=1. */
7241 constraint (!inst.operands[0].isreg,
7242 _("'[' expected after PLD mnemonic"));
7243 constraint (inst.operands[0].postind,
7244 _("post-indexed expression used in preload instruction"));
7245 constraint (inst.operands[0].writeback,
7246 _("writeback used in preload instruction"));
7247 constraint (!inst.operands[0].preind,
7248 _("unindexed addressing used in preload instruction"));
7249 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7252 /* ARMv7: PLI <addr_mode> */
7256 constraint (!inst.operands[0].isreg,
7257 _("'[' expected after PLI mnemonic"));
7258 constraint (inst.operands[0].postind,
7259 _("post-indexed expression used in preload instruction"));
7260 constraint (inst.operands[0].writeback,
7261 _("writeback used in preload instruction"));
7262 constraint (!inst.operands[0].preind,
7263 _("unindexed addressing used in preload instruction"));
7264 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
7265 inst.instruction &= ~PRE_INDEX;
7271 inst.operands[1] = inst.operands[0];
7272 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
7273 inst.operands[0].isreg = 1;
7274 inst.operands[0].writeback = 1;
7275 inst.operands[0].reg = REG_SP;
7279 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
7280 word at the specified address and the following word
7282 Unconditionally executed.
7283 Error if Rn is R15. */
7288 inst.instruction |= inst.operands[0].reg << 16;
7289 if (inst.operands[0].writeback)
7290 inst.instruction |= WRITE_BACK;
7293 /* ARM V6 ssat (argument parse). */
7298 inst.instruction |= inst.operands[0].reg << 12;
7299 inst.instruction |= (inst.operands[1].imm - 1) << 16;
7300 inst.instruction |= inst.operands[2].reg;
7302 if (inst.operands[3].present)
7303 encode_arm_shift (3);
7306 /* ARM V6 usat (argument parse). */
7311 inst.instruction |= inst.operands[0].reg << 12;
7312 inst.instruction |= inst.operands[1].imm << 16;
7313 inst.instruction |= inst.operands[2].reg;
7315 if (inst.operands[3].present)
7316 encode_arm_shift (3);
7319 /* ARM V6 ssat16 (argument parse). */
7324 inst.instruction |= inst.operands[0].reg << 12;
7325 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
7326 inst.instruction |= inst.operands[2].reg;
7332 inst.instruction |= inst.operands[0].reg << 12;
7333 inst.instruction |= inst.operands[1].imm << 16;
7334 inst.instruction |= inst.operands[2].reg;
7337 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
7338 preserving the other bits.
7340 setend <endian_specifier>, where <endian_specifier> is either
7346 if (inst.operands[0].imm)
7347 inst.instruction |= 0x200;
7353 unsigned int Rm = (inst.operands[1].present
7354 ? inst.operands[1].reg
7355 : inst.operands[0].reg);
7357 inst.instruction |= inst.operands[0].reg << 12;
7358 inst.instruction |= Rm;
7359 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
7361 inst.instruction |= inst.operands[2].reg << 8;
7362 inst.instruction |= SHIFT_BY_REG;
7365 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
7371 inst.reloc.type = BFD_RELOC_ARM_SMC;
7372 inst.reloc.pc_rel = 0;
7378 inst.reloc.type = BFD_RELOC_ARM_SWI;
7379 inst.reloc.pc_rel = 0;
7382 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
7383 SMLAxy{cond} Rd,Rm,Rs,Rn
7384 SMLAWy{cond} Rd,Rm,Rs,Rn
7385 Error if any register is R15. */
7390 inst.instruction |= inst.operands[0].reg << 16;
7391 inst.instruction |= inst.operands[1].reg;
7392 inst.instruction |= inst.operands[2].reg << 8;
7393 inst.instruction |= inst.operands[3].reg << 12;
7396 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
7397 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
7398 Error if any register is R15.
7399 Warning if Rdlo == Rdhi. */
7404 inst.instruction |= inst.operands[0].reg << 12;
7405 inst.instruction |= inst.operands[1].reg << 16;
7406 inst.instruction |= inst.operands[2].reg;
7407 inst.instruction |= inst.operands[3].reg << 8;
7409 if (inst.operands[0].reg == inst.operands[1].reg)
7410 as_tsktsk (_("rdhi and rdlo must be different"));
7413 /* ARM V5E (El Segundo) signed-multiply (argument parse)
7414 SMULxy{cond} Rd,Rm,Rs
7415 Error if any register is R15. */
7420 inst.instruction |= inst.operands[0].reg << 16;
7421 inst.instruction |= inst.operands[1].reg;
7422 inst.instruction |= inst.operands[2].reg << 8;
7425 /* ARM V6 srs (argument parse). */
7430 inst.instruction |= inst.operands[0].imm;
7431 if (inst.operands[0].writeback)
7432 inst.instruction |= WRITE_BACK;
7435 /* ARM V6 strex (argument parse). */
7440 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
7441 || inst.operands[2].postind || inst.operands[2].writeback
7442 || inst.operands[2].immisreg || inst.operands[2].shifted
7443 || inst.operands[2].negative
7444 /* See comment in do_ldrex(). */
7445 || (inst.operands[2].reg == REG_PC),
7448 constraint (inst.operands[0].reg == inst.operands[1].reg
7449 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
7451 constraint (inst.reloc.exp.X_op != O_constant
7452 || inst.reloc.exp.X_add_number != 0,
7453 _("offset must be zero in ARM encoding"));
7455 inst.instruction |= inst.operands[0].reg << 12;
7456 inst.instruction |= inst.operands[1].reg;
7457 inst.instruction |= inst.operands[2].reg << 16;
7458 inst.reloc.type = BFD_RELOC_UNUSED;
7464 constraint (inst.operands[1].reg % 2 != 0,
7465 _("even register required"));
7466 constraint (inst.operands[2].present
7467 && inst.operands[2].reg != inst.operands[1].reg + 1,
7468 _("can only store two consecutive registers"));
7469 /* If op 2 were present and equal to PC, this function wouldn't
7470 have been called in the first place. */
7471 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
7473 constraint (inst.operands[0].reg == inst.operands[1].reg
7474 || inst.operands[0].reg == inst.operands[1].reg + 1
7475 || inst.operands[0].reg == inst.operands[3].reg,
7478 inst.instruction |= inst.operands[0].reg << 12;
7479 inst.instruction |= inst.operands[1].reg;
7480 inst.instruction |= inst.operands[3].reg << 16;
7483 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
7484 extends it to 32-bits, and adds the result to a value in another
7485 register. You can specify a rotation by 0, 8, 16, or 24 bits
7486 before extracting the 16-bit value.
7487 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
7488 Condition defaults to COND_ALWAYS.
7489 Error if any register uses R15. */
7494 inst.instruction |= inst.operands[0].reg << 12;
7495 inst.instruction |= inst.operands[1].reg << 16;
7496 inst.instruction |= inst.operands[2].reg;
7497 inst.instruction |= inst.operands[3].imm << 10;
7502 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
7503 Condition defaults to COND_ALWAYS.
7504 Error if any register uses R15. */
7509 inst.instruction |= inst.operands[0].reg << 12;
7510 inst.instruction |= inst.operands[1].reg;
7511 inst.instruction |= inst.operands[2].imm << 10;
7514 /* VFP instructions. In a logical order: SP variant first, monad
7515 before dyad, arithmetic then move then load/store. */
7518 do_vfp_sp_monadic (void)
7520 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7521 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7525 do_vfp_sp_dyadic (void)
7527 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7528 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7529 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7533 do_vfp_sp_compare_z (void)
7535 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7539 do_vfp_dp_sp_cvt (void)
7541 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7542 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
7546 do_vfp_sp_dp_cvt (void)
7548 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7549 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7553 do_vfp_reg_from_sp (void)
7555 inst.instruction |= inst.operands[0].reg << 12;
7556 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
7560 do_vfp_reg2_from_sp2 (void)
7562 constraint (inst.operands[2].imm != 2,
7563 _("only two consecutive VFP SP registers allowed here"));
7564 inst.instruction |= inst.operands[0].reg << 12;
7565 inst.instruction |= inst.operands[1].reg << 16;
7566 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
7570 do_vfp_sp_from_reg (void)
7572 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
7573 inst.instruction |= inst.operands[1].reg << 12;
7577 do_vfp_sp2_from_reg2 (void)
7579 constraint (inst.operands[0].imm != 2,
7580 _("only two consecutive VFP SP registers allowed here"));
7581 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
7582 inst.instruction |= inst.operands[1].reg << 12;
7583 inst.instruction |= inst.operands[2].reg << 16;
7587 do_vfp_sp_ldst (void)
7589 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7590 encode_arm_cp_address (1, FALSE, TRUE, 0);
7594 do_vfp_dp_ldst (void)
7596 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7597 encode_arm_cp_address (1, FALSE, TRUE, 0);
7602 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
7604 if (inst.operands[0].writeback)
7605 inst.instruction |= WRITE_BACK;
7607 constraint (ldstm_type != VFP_LDSTMIA,
7608 _("this addressing mode requires base-register writeback"));
7609 inst.instruction |= inst.operands[0].reg << 16;
7610 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
7611 inst.instruction |= inst.operands[1].imm;
7615 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7619 if (inst.operands[0].writeback)
7620 inst.instruction |= WRITE_BACK;
7622 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7623 _("this addressing mode requires base-register writeback"));
7625 inst.instruction |= inst.operands[0].reg << 16;
7626 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7628 count = inst.operands[1].imm << 1;
7629 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7632 inst.instruction |= count;
7636 do_vfp_sp_ldstmia (void)
7638 vfp_sp_ldstm (VFP_LDSTMIA);
7642 do_vfp_sp_ldstmdb (void)
7644 vfp_sp_ldstm (VFP_LDSTMDB);
7648 do_vfp_dp_ldstmia (void)
7650 vfp_dp_ldstm (VFP_LDSTMIA);
7654 do_vfp_dp_ldstmdb (void)
7656 vfp_dp_ldstm (VFP_LDSTMDB);
7660 do_vfp_xp_ldstmia (void)
7662 vfp_dp_ldstm (VFP_LDSTMIAX);
7666 do_vfp_xp_ldstmdb (void)
7668 vfp_dp_ldstm (VFP_LDSTMDBX);
7672 do_vfp_dp_rd_rm (void)
7674 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7675 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7679 do_vfp_dp_rn_rd (void)
7681 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7682 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7686 do_vfp_dp_rd_rn (void)
7688 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7689 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7693 do_vfp_dp_rd_rn_rm (void)
7695 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7696 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7697 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7703 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7707 do_vfp_dp_rm_rd_rn (void)
7709 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7710 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7711 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7714 /* VFPv3 instructions. */
7716 do_vfp_sp_const (void)
7718 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7719 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7720 inst.instruction |= (inst.operands[1].imm >> 4);
7724 do_vfp_dp_const (void)
7726 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7727 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7728 inst.instruction |= (inst.operands[1].imm >> 4);
7732 vfp_conv (int srcsize)
7734 unsigned immbits = srcsize - inst.operands[1].imm;
7735 inst.instruction |= (immbits & 1) << 5;
7736 inst.instruction |= (immbits >> 1);
7740 do_vfp_sp_conv_16 (void)
7742 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7747 do_vfp_dp_conv_16 (void)
7749 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7754 do_vfp_sp_conv_32 (void)
7756 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7761 do_vfp_dp_conv_32 (void)
7763 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7768 /* FPA instructions. Also in a logical order. */
7773 inst.instruction |= inst.operands[0].reg << 16;
7774 inst.instruction |= inst.operands[1].reg;
7778 do_fpa_ldmstm (void)
7780 inst.instruction |= inst.operands[0].reg << 12;
7781 switch (inst.operands[1].imm)
7783 case 1: inst.instruction |= CP_T_X; break;
7784 case 2: inst.instruction |= CP_T_Y; break;
7785 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7790 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7792 /* The instruction specified "ea" or "fd", so we can only accept
7793 [Rn]{!}. The instruction does not really support stacking or
7794 unstacking, so we have to emulate these by setting appropriate
7795 bits and offsets. */
7796 constraint (inst.reloc.exp.X_op != O_constant
7797 || inst.reloc.exp.X_add_number != 0,
7798 _("this instruction does not support indexing"));
7800 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7801 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7803 if (!(inst.instruction & INDEX_UP))
7804 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7806 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7808 inst.operands[2].preind = 0;
7809 inst.operands[2].postind = 1;
7813 encode_arm_cp_address (2, TRUE, TRUE, 0);
7817 /* iWMMXt instructions: strictly in alphabetical order. */
7820 do_iwmmxt_tandorc (void)
7822 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7826 do_iwmmxt_textrc (void)
7828 inst.instruction |= inst.operands[0].reg << 12;
7829 inst.instruction |= inst.operands[1].imm;
7833 do_iwmmxt_textrm (void)
7835 inst.instruction |= inst.operands[0].reg << 12;
7836 inst.instruction |= inst.operands[1].reg << 16;
7837 inst.instruction |= inst.operands[2].imm;
7841 do_iwmmxt_tinsr (void)
7843 inst.instruction |= inst.operands[0].reg << 16;
7844 inst.instruction |= inst.operands[1].reg << 12;
7845 inst.instruction |= inst.operands[2].imm;
7849 do_iwmmxt_tmia (void)
7851 inst.instruction |= inst.operands[0].reg << 5;
7852 inst.instruction |= inst.operands[1].reg;
7853 inst.instruction |= inst.operands[2].reg << 12;
7857 do_iwmmxt_waligni (void)
7859 inst.instruction |= inst.operands[0].reg << 12;
7860 inst.instruction |= inst.operands[1].reg << 16;
7861 inst.instruction |= inst.operands[2].reg;
7862 inst.instruction |= inst.operands[3].imm << 20;
7866 do_iwmmxt_wmerge (void)
7868 inst.instruction |= inst.operands[0].reg << 12;
7869 inst.instruction |= inst.operands[1].reg << 16;
7870 inst.instruction |= inst.operands[2].reg;
7871 inst.instruction |= inst.operands[3].imm << 21;
7875 do_iwmmxt_wmov (void)
7877 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7878 inst.instruction |= inst.operands[0].reg << 12;
7879 inst.instruction |= inst.operands[1].reg << 16;
7880 inst.instruction |= inst.operands[1].reg;
7884 do_iwmmxt_wldstbh (void)
7887 inst.instruction |= inst.operands[0].reg << 12;
7889 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7891 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7892 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7896 do_iwmmxt_wldstw (void)
7898 /* RIWR_RIWC clears .isreg for a control register. */
7899 if (!inst.operands[0].isreg)
7901 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7902 inst.instruction |= 0xf0000000;
7905 inst.instruction |= inst.operands[0].reg << 12;
7906 encode_arm_cp_address (1, TRUE, TRUE, 0);
7910 do_iwmmxt_wldstd (void)
7912 inst.instruction |= inst.operands[0].reg << 12;
7913 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
7914 && inst.operands[1].immisreg)
7916 inst.instruction &= ~0x1a000ff;
7917 inst.instruction |= (0xf << 28);
7918 if (inst.operands[1].preind)
7919 inst.instruction |= PRE_INDEX;
7920 if (!inst.operands[1].negative)
7921 inst.instruction |= INDEX_UP;
7922 if (inst.operands[1].writeback)
7923 inst.instruction |= WRITE_BACK;
7924 inst.instruction |= inst.operands[1].reg << 16;
7925 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7926 inst.instruction |= inst.operands[1].imm;
7929 encode_arm_cp_address (1, TRUE, FALSE, 0);
7933 do_iwmmxt_wshufh (void)
7935 inst.instruction |= inst.operands[0].reg << 12;
7936 inst.instruction |= inst.operands[1].reg << 16;
7937 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7938 inst.instruction |= (inst.operands[2].imm & 0x0f);
7942 do_iwmmxt_wzero (void)
7944 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7945 inst.instruction |= inst.operands[0].reg;
7946 inst.instruction |= inst.operands[0].reg << 12;
7947 inst.instruction |= inst.operands[0].reg << 16;
7951 do_iwmmxt_wrwrwr_or_imm5 (void)
7953 if (inst.operands[2].isreg)
7956 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
7957 _("immediate operand requires iWMMXt2"));
7959 if (inst.operands[2].imm == 0)
7961 switch ((inst.instruction >> 20) & 0xf)
7967 /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16. */
7968 inst.operands[2].imm = 16;
7969 inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
7975 /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32. */
7976 inst.operands[2].imm = 32;
7977 inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
7984 /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn. */
7986 wrn = (inst.instruction >> 16) & 0xf;
7987 inst.instruction &= 0xff0fff0f;
7988 inst.instruction |= wrn;
7989 /* Bail out here; the instruction is now assembled. */
7994 /* Map 32 -> 0, etc. */
7995 inst.operands[2].imm &= 0x1f;
7996 inst.instruction |= (0xf << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
8000 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
8001 operations first, then control, shift, and load/store. */
8003 /* Insns like "foo X,Y,Z". */
8006 do_mav_triple (void)
8008 inst.instruction |= inst.operands[0].reg << 16;
8009 inst.instruction |= inst.operands[1].reg;
8010 inst.instruction |= inst.operands[2].reg << 12;
8013 /* Insns like "foo W,X,Y,Z".
8014 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
8019 inst.instruction |= inst.operands[0].reg << 5;
8020 inst.instruction |= inst.operands[1].reg << 12;
8021 inst.instruction |= inst.operands[2].reg << 16;
8022 inst.instruction |= inst.operands[3].reg;
8025 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
8029 inst.instruction |= inst.operands[1].reg << 12;
8032 /* Maverick shift immediate instructions.
8033 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
8034 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
8039 int imm = inst.operands[2].imm;
8041 inst.instruction |= inst.operands[0].reg << 12;
8042 inst.instruction |= inst.operands[1].reg << 16;
8044 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
8045 Bits 5-7 of the insn should have bits 4-6 of the immediate.
8046 Bit 4 should be 0. */
8047 imm = (imm & 0xf) | ((imm & 0x70) << 1);
8049 inst.instruction |= imm;
8052 /* XScale instructions. Also sorted arithmetic before move. */
8054 /* Xscale multiply-accumulate (argument parse)
8057 MIAxycc acc0,Rm,Rs. */
8062 inst.instruction |= inst.operands[1].reg;
8063 inst.instruction |= inst.operands[2].reg << 12;
8066 /* Xscale move-accumulator-register (argument parse)
8068 MARcc acc0,RdLo,RdHi. */
8073 inst.instruction |= inst.operands[1].reg << 12;
8074 inst.instruction |= inst.operands[2].reg << 16;
8077 /* Xscale move-register-accumulator (argument parse)
8079 MRAcc RdLo,RdHi,acc0. */
8084 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
8085 inst.instruction |= inst.operands[0].reg << 12;
8086 inst.instruction |= inst.operands[1].reg << 16;
8089 /* Encoding functions relevant only to Thumb. */
8091 /* inst.operands[i] is a shifted-register operand; encode
8092 it into inst.instruction in the format used by Thumb32. */
8095 encode_thumb32_shifted_operand (int i)
8097 unsigned int value = inst.reloc.exp.X_add_number;
8098 unsigned int shift = inst.operands[i].shift_kind;
8100 constraint (inst.operands[i].immisreg,
8101 _("shift by register not allowed in thumb mode"));
8102 inst.instruction |= inst.operands[i].reg;
8103 if (shift == SHIFT_RRX)
8104 inst.instruction |= SHIFT_ROR << 4;
8107 constraint (inst.reloc.exp.X_op != O_constant,
8108 _("expression too complex"));
8110 constraint (value > 32
8111 || (value == 32 && (shift == SHIFT_LSL
8112 || shift == SHIFT_ROR)),
8113 _("shift expression is too large"));
8117 else if (value == 32)
8120 inst.instruction |= shift << 4;
8121 inst.instruction |= (value & 0x1c) << 10;
8122 inst.instruction |= (value & 0x03) << 6;
8127 /* inst.operands[i] was set up by parse_address. Encode it into a
8128 Thumb32 format load or store instruction. Reject forms that cannot
8129 be used with such instructions. If is_t is true, reject forms that
8130 cannot be used with a T instruction; if is_d is true, reject forms
8131 that cannot be used with a D instruction. */
8134 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
8136 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8138 constraint (!inst.operands[i].isreg,
8139 _("Instruction does not support =N addresses"));
8141 inst.instruction |= inst.operands[i].reg << 16;
8142 if (inst.operands[i].immisreg)
8144 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
8145 constraint (is_t || is_d, _("cannot use register index with this instruction"));
8146 constraint (inst.operands[i].negative,
8147 _("Thumb does not support negative register indexing"));
8148 constraint (inst.operands[i].postind,
8149 _("Thumb does not support register post-indexing"));
8150 constraint (inst.operands[i].writeback,
8151 _("Thumb does not support register indexing with writeback"));
8152 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
8153 _("Thumb supports only LSL in shifted register indexing"));
8155 inst.instruction |= inst.operands[i].imm;
8156 if (inst.operands[i].shifted)
8158 constraint (inst.reloc.exp.X_op != O_constant,
8159 _("expression too complex"));
8160 constraint (inst.reloc.exp.X_add_number < 0
8161 || inst.reloc.exp.X_add_number > 3,
8162 _("shift out of range"));
8163 inst.instruction |= inst.reloc.exp.X_add_number << 4;
8165 inst.reloc.type = BFD_RELOC_UNUSED;
8167 else if (inst.operands[i].preind)
8169 constraint (is_pc && inst.operands[i].writeback,
8170 _("cannot use writeback with PC-relative addressing"));
8171 constraint (is_t && inst.operands[i].writeback,
8172 _("cannot use writeback with this instruction"));
8176 inst.instruction |= 0x01000000;
8177 if (inst.operands[i].writeback)
8178 inst.instruction |= 0x00200000;
8182 inst.instruction |= 0x00000c00;
8183 if (inst.operands[i].writeback)
8184 inst.instruction |= 0x00000100;
8186 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8188 else if (inst.operands[i].postind)
8190 assert (inst.operands[i].writeback);
8191 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
8192 constraint (is_t, _("cannot use post-indexing with this instruction"));
8195 inst.instruction |= 0x00200000;
8197 inst.instruction |= 0x00000900;
8198 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
8200 else /* unindexed - only for coprocessor */
8201 inst.error = _("instruction does not accept unindexed addressing");
8204 /* Table of Thumb instructions which exist in both 16- and 32-bit
8205 encodings (the latter only in post-V6T2 cores). The index is the
8206 value used in the insns table below. When there is more than one
8207 possible 16-bit encoding for the instruction, this table always
8209 Also contains several pseudo-instructions used during relaxation. */
8210 #define T16_32_TAB \
8211 X(adc, 4140, eb400000), \
8212 X(adcs, 4140, eb500000), \
8213 X(add, 1c00, eb000000), \
8214 X(adds, 1c00, eb100000), \
8215 X(addi, 0000, f1000000), \
8216 X(addis, 0000, f1100000), \
8217 X(add_pc,000f, f20f0000), \
8218 X(add_sp,000d, f10d0000), \
8219 X(adr, 000f, f20f0000), \
8220 X(and, 4000, ea000000), \
8221 X(ands, 4000, ea100000), \
8222 X(asr, 1000, fa40f000), \
8223 X(asrs, 1000, fa50f000), \
8224 X(b, e000, f000b000), \
8225 X(bcond, d000, f0008000), \
8226 X(bic, 4380, ea200000), \
8227 X(bics, 4380, ea300000), \
8228 X(cmn, 42c0, eb100f00), \
8229 X(cmp, 2800, ebb00f00), \
8230 X(cpsie, b660, f3af8400), \
8231 X(cpsid, b670, f3af8600), \
8232 X(cpy, 4600, ea4f0000), \
8233 X(dec_sp,80dd, f1bd0d00), \
8234 X(eor, 4040, ea800000), \
8235 X(eors, 4040, ea900000), \
8236 X(inc_sp,00dd, f10d0d00), \
8237 X(ldmia, c800, e8900000), \
8238 X(ldr, 6800, f8500000), \
8239 X(ldrb, 7800, f8100000), \
8240 X(ldrh, 8800, f8300000), \
8241 X(ldrsb, 5600, f9100000), \
8242 X(ldrsh, 5e00, f9300000), \
8243 X(ldr_pc,4800, f85f0000), \
8244 X(ldr_pc2,4800, f85f0000), \
8245 X(ldr_sp,9800, f85d0000), \
8246 X(lsl, 0000, fa00f000), \
8247 X(lsls, 0000, fa10f000), \
8248 X(lsr, 0800, fa20f000), \
8249 X(lsrs, 0800, fa30f000), \
8250 X(mov, 2000, ea4f0000), \
8251 X(movs, 2000, ea5f0000), \
8252 X(mul, 4340, fb00f000), \
8253 X(muls, 4340, ffffffff), /* no 32b muls */ \
8254 X(mvn, 43c0, ea6f0000), \
8255 X(mvns, 43c0, ea7f0000), \
8256 X(neg, 4240, f1c00000), /* rsb #0 */ \
8257 X(negs, 4240, f1d00000), /* rsbs #0 */ \
8258 X(orr, 4300, ea400000), \
8259 X(orrs, 4300, ea500000), \
8260 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
8261 X(push, b400, e92d0000), /* stmdb sp!,... */ \
8262 X(rev, ba00, fa90f080), \
8263 X(rev16, ba40, fa90f090), \
8264 X(revsh, bac0, fa90f0b0), \
8265 X(ror, 41c0, fa60f000), \
8266 X(rors, 41c0, fa70f000), \
8267 X(sbc, 4180, eb600000), \
8268 X(sbcs, 4180, eb700000), \
8269 X(stmia, c000, e8800000), \
8270 X(str, 6000, f8400000), \
8271 X(strb, 7000, f8000000), \
8272 X(strh, 8000, f8200000), \
8273 X(str_sp,9000, f84d0000), \
8274 X(sub, 1e00, eba00000), \
8275 X(subs, 1e00, ebb00000), \
8276 X(subi, 8000, f1a00000), \
8277 X(subis, 8000, f1b00000), \
8278 X(sxtb, b240, fa4ff080), \
8279 X(sxth, b200, fa0ff080), \
8280 X(tst, 4200, ea100f00), \
8281 X(uxtb, b2c0, fa5ff080), \
8282 X(uxth, b280, fa1ff080), \
8283 X(nop, bf00, f3af8000), \
8284 X(yield, bf10, f3af8001), \
8285 X(wfe, bf20, f3af8002), \
8286 X(wfi, bf30, f3af8003), \
8287 X(sev, bf40, f3af9004), /* typo, 8004? */
8289 /* To catch errors in encoding functions, the codes are all offset by
8290 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
8291 as 16-bit instructions. */
8292 #define X(a,b,c) T_MNEM_##a
8293 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
8296 #define X(a,b,c) 0x##b
8297 static const unsigned short thumb_op16[] = { T16_32_TAB };
8298 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
8301 #define X(a,b,c) 0x##c
8302 static const unsigned int thumb_op32[] = { T16_32_TAB };
8303 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
8304 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
8308 /* Thumb instruction encoders, in alphabetical order. */
8312 do_t_add_sub_w (void)
8316 Rd = inst.operands[0].reg;
8317 Rn = inst.operands[1].reg;
8319 constraint (Rd == 15, _("PC not allowed as destination"));
8320 inst.instruction |= (Rn << 16) | (Rd << 8);
8321 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8324 /* Parse an add or subtract instruction. We get here with inst.instruction
8325 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
8332 Rd = inst.operands[0].reg;
8333 Rs = (inst.operands[1].present
8334 ? inst.operands[1].reg /* Rd, Rs, foo */
8335 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8343 flags = (inst.instruction == T_MNEM_adds
8344 || inst.instruction == T_MNEM_subs);
8346 narrow = (current_it_mask == 0);
8348 narrow = (current_it_mask != 0);
8349 if (!inst.operands[2].isreg)
8353 add = (inst.instruction == T_MNEM_add
8354 || inst.instruction == T_MNEM_adds);
8356 if (inst.size_req != 4)
8358 /* Attempt to use a narrow opcode, with relaxation if
8360 if (Rd == REG_SP && Rs == REG_SP && !flags)
8361 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
8362 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
8363 opcode = T_MNEM_add_sp;
8364 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
8365 opcode = T_MNEM_add_pc;
8366 else if (Rd <= 7 && Rs <= 7 && narrow)
8369 opcode = add ? T_MNEM_addis : T_MNEM_subis;
8371 opcode = add ? T_MNEM_addi : T_MNEM_subi;
8375 inst.instruction = THUMB_OP16(opcode);
8376 inst.instruction |= (Rd << 4) | Rs;
8377 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8378 if (inst.size_req != 2)
8379 inst.relax = opcode;
8382 constraint (inst.size_req == 2, BAD_HIREG);
8384 if (inst.size_req == 4
8385 || (inst.size_req != 2 && !opcode))
8389 /* Always use addw/subw. */
8390 inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
8391 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
8395 inst.instruction = THUMB_OP32 (inst.instruction);
8396 inst.instruction = (inst.instruction & 0xe1ffffff)
8399 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8401 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_IMM;
8403 inst.instruction |= inst.operands[0].reg << 8;
8404 inst.instruction |= inst.operands[1].reg << 16;
8409 Rn = inst.operands[2].reg;
8410 /* See if we can do this with a 16-bit instruction. */
8411 if (!inst.operands[2].shifted && inst.size_req != 4)
8413 if (Rd > 7 || Rs > 7 || Rn > 7)
8418 inst.instruction = ((inst.instruction == T_MNEM_adds
8419 || inst.instruction == T_MNEM_add)
8422 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8426 if (inst.instruction == T_MNEM_add)
8430 inst.instruction = T_OPCODE_ADD_HI;
8431 inst.instruction |= (Rd & 8) << 4;
8432 inst.instruction |= (Rd & 7);
8433 inst.instruction |= Rn << 3;
8436 /* ... because addition is commutative! */
8439 inst.instruction = T_OPCODE_ADD_HI;
8440 inst.instruction |= (Rd & 8) << 4;
8441 inst.instruction |= (Rd & 7);
8442 inst.instruction |= Rs << 3;
8447 /* If we get here, it can't be done in 16 bits. */
8448 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
8449 _("shift must be constant"));
8450 inst.instruction = THUMB_OP32 (inst.instruction);
8451 inst.instruction |= Rd << 8;
8452 inst.instruction |= Rs << 16;
8453 encode_thumb32_shifted_operand (2);
8458 constraint (inst.instruction == T_MNEM_adds
8459 || inst.instruction == T_MNEM_subs,
8462 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
8464 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
8465 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
8468 inst.instruction = (inst.instruction == T_MNEM_add
8470 inst.instruction |= (Rd << 4) | Rs;
8471 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8475 Rn = inst.operands[2].reg;
8476 constraint (inst.operands[2].shifted, _("unshifted register required"));
8478 /* We now have Rd, Rs, and Rn set to registers. */
8479 if (Rd > 7 || Rs > 7 || Rn > 7)
8481 /* Can't do this for SUB. */
8482 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
8483 inst.instruction = T_OPCODE_ADD_HI;
8484 inst.instruction |= (Rd & 8) << 4;
8485 inst.instruction |= (Rd & 7);
8487 inst.instruction |= Rn << 3;
8489 inst.instruction |= Rs << 3;
8491 constraint (1, _("dest must overlap one source register"));
8495 inst.instruction = (inst.instruction == T_MNEM_add
8496 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
8497 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
8505 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
8507 /* Defer to section relaxation. */
8508 inst.relax = inst.instruction;
8509 inst.instruction = THUMB_OP16 (inst.instruction);
8510 inst.instruction |= inst.operands[0].reg << 4;
8512 else if (unified_syntax && inst.size_req != 2)
8514 /* Generate a 32-bit opcode. */
8515 inst.instruction = THUMB_OP32 (inst.instruction);
8516 inst.instruction |= inst.operands[0].reg << 8;
8517 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
8518 inst.reloc.pc_rel = 1;
8522 /* Generate a 16-bit opcode. */
8523 inst.instruction = THUMB_OP16 (inst.instruction);
8524 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
8525 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
8526 inst.reloc.pc_rel = 1;
8528 inst.instruction |= inst.operands[0].reg << 4;
8532 /* Arithmetic instructions for which there is just one 16-bit
8533 instruction encoding, and it allows only two low registers.
8534 For maximal compatibility with ARM syntax, we allow three register
8535 operands even when Thumb-32 instructions are not available, as long
8536 as the first two are identical. For instance, both "sbc r0,r1" and
8537 "sbc r0,r0,r1" are allowed. */
8543 Rd = inst.operands[0].reg;
8544 Rs = (inst.operands[1].present
8545 ? inst.operands[1].reg /* Rd, Rs, foo */
8546 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8547 Rn = inst.operands[2].reg;
8551 if (!inst.operands[2].isreg)
8553 /* For an immediate, we always generate a 32-bit opcode;
8554 section relaxation will shrink it later if possible. */
8555 inst.instruction = THUMB_OP32 (inst.instruction);
8556 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8557 inst.instruction |= Rd << 8;
8558 inst.instruction |= Rs << 16;
8559 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8565 /* See if we can do this with a 16-bit instruction. */
8566 if (THUMB_SETS_FLAGS (inst.instruction))
8567 narrow = current_it_mask == 0;
8569 narrow = current_it_mask != 0;
8571 if (Rd > 7 || Rn > 7 || Rs > 7)
8573 if (inst.operands[2].shifted)
8575 if (inst.size_req == 4)
8581 inst.instruction = THUMB_OP16 (inst.instruction);
8582 inst.instruction |= Rd;
8583 inst.instruction |= Rn << 3;
8587 /* If we get here, it can't be done in 16 bits. */
8588 constraint (inst.operands[2].shifted
8589 && inst.operands[2].immisreg,
8590 _("shift must be constant"));
8591 inst.instruction = THUMB_OP32 (inst.instruction);
8592 inst.instruction |= Rd << 8;
8593 inst.instruction |= Rs << 16;
8594 encode_thumb32_shifted_operand (2);
8599 /* On its face this is a lie - the instruction does set the
8600 flags. However, the only supported mnemonic in this mode
8602 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8604 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8605 _("unshifted register required"));
8606 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8607 constraint (Rd != Rs,
8608 _("dest and source1 must be the same register"));
8610 inst.instruction = THUMB_OP16 (inst.instruction);
8611 inst.instruction |= Rd;
8612 inst.instruction |= Rn << 3;
8616 /* Similarly, but for instructions where the arithmetic operation is
8617 commutative, so we can allow either of them to be different from
8618 the destination operand in a 16-bit instruction. For instance, all
8619 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
8626 Rd = inst.operands[0].reg;
8627 Rs = (inst.operands[1].present
8628 ? inst.operands[1].reg /* Rd, Rs, foo */
8629 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8630 Rn = inst.operands[2].reg;
8634 if (!inst.operands[2].isreg)
8636 /* For an immediate, we always generate a 32-bit opcode;
8637 section relaxation will shrink it later if possible. */
8638 inst.instruction = THUMB_OP32 (inst.instruction);
8639 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8640 inst.instruction |= Rd << 8;
8641 inst.instruction |= Rs << 16;
8642 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8648 /* See if we can do this with a 16-bit instruction. */
8649 if (THUMB_SETS_FLAGS (inst.instruction))
8650 narrow = current_it_mask == 0;
8652 narrow = current_it_mask != 0;
8654 if (Rd > 7 || Rn > 7 || Rs > 7)
8656 if (inst.operands[2].shifted)
8658 if (inst.size_req == 4)
8665 inst.instruction = THUMB_OP16 (inst.instruction);
8666 inst.instruction |= Rd;
8667 inst.instruction |= Rn << 3;
8672 inst.instruction = THUMB_OP16 (inst.instruction);
8673 inst.instruction |= Rd;
8674 inst.instruction |= Rs << 3;
8679 /* If we get here, it can't be done in 16 bits. */
8680 constraint (inst.operands[2].shifted
8681 && inst.operands[2].immisreg,
8682 _("shift must be constant"));
8683 inst.instruction = THUMB_OP32 (inst.instruction);
8684 inst.instruction |= Rd << 8;
8685 inst.instruction |= Rs << 16;
8686 encode_thumb32_shifted_operand (2);
8691 /* On its face this is a lie - the instruction does set the
8692 flags. However, the only supported mnemonic in this mode
8694 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8696 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
8697 _("unshifted register required"));
8698 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
8700 inst.instruction = THUMB_OP16 (inst.instruction);
8701 inst.instruction |= Rd;
8704 inst.instruction |= Rn << 3;
8706 inst.instruction |= Rs << 3;
8708 constraint (1, _("dest must overlap one source register"));
8715 if (inst.operands[0].present)
8717 constraint ((inst.instruction & 0xf0) != 0x40
8718 && inst.operands[0].imm != 0xf,
8719 "bad barrier type");
8720 inst.instruction |= inst.operands[0].imm;
8723 inst.instruction |= 0xf;
8729 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8730 constraint (msb > 32, _("bit-field extends past end of register"));
8731 /* The instruction encoding stores the LSB and MSB,
8732 not the LSB and width. */
8733 inst.instruction |= inst.operands[0].reg << 8;
8734 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8735 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8736 inst.instruction |= msb - 1;
8744 /* #0 in second position is alternative syntax for bfc, which is
8745 the same instruction but with REG_PC in the Rm field. */
8746 if (!inst.operands[1].isreg)
8747 inst.operands[1].reg = REG_PC;
8749 msb = inst.operands[2].imm + inst.operands[3].imm;
8750 constraint (msb > 32, _("bit-field extends past end of register"));
8751 /* The instruction encoding stores the LSB and MSB,
8752 not the LSB and width. */
8753 inst.instruction |= inst.operands[0].reg << 8;
8754 inst.instruction |= inst.operands[1].reg << 16;
8755 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8756 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8757 inst.instruction |= msb - 1;
8763 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8764 _("bit-field extends past end of register"));
8765 inst.instruction |= inst.operands[0].reg << 8;
8766 inst.instruction |= inst.operands[1].reg << 16;
8767 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8768 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8769 inst.instruction |= inst.operands[3].imm - 1;
8772 /* ARM V5 Thumb BLX (argument parse)
8773 BLX <target_addr> which is BLX(1)
8774 BLX <Rm> which is BLX(2)
8775 Unfortunately, there are two different opcodes for this mnemonic.
8776 So, the insns[].value is not used, and the code here zaps values
8777 into inst.instruction.
8779 ??? How to take advantage of the additional two bits of displacement
8780 available in Thumb32 mode? Need new relocation? */
8785 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8786 if (inst.operands[0].isreg)
8787 /* We have a register, so this is BLX(2). */
8788 inst.instruction |= inst.operands[0].reg << 3;
8791 /* No register. This must be BLX(1). */
8792 inst.instruction = 0xf000e800;
8794 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8795 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8798 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8799 inst.reloc.pc_rel = 1;
8809 if (current_it_mask)
8811 /* Conditional branches inside IT blocks are encoded as unconditional
8814 /* A branch must be the last instruction in an IT block. */
8815 constraint (current_it_mask != 0x10, BAD_BRANCH);
8820 if (cond != COND_ALWAYS)
8821 opcode = T_MNEM_bcond;
8823 opcode = inst.instruction;
8825 if (unified_syntax && inst.size_req == 4)
8827 inst.instruction = THUMB_OP32(opcode);
8828 if (cond == COND_ALWAYS)
8829 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8832 assert (cond != 0xF);
8833 inst.instruction |= cond << 22;
8834 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8839 inst.instruction = THUMB_OP16(opcode);
8840 if (cond == COND_ALWAYS)
8841 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8844 inst.instruction |= cond << 8;
8845 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8847 /* Allow section relaxation. */
8848 if (unified_syntax && inst.size_req != 2)
8849 inst.relax = opcode;
8852 inst.reloc.pc_rel = 1;
8858 constraint (inst.cond != COND_ALWAYS,
8859 _("instruction is always unconditional"));
8860 if (inst.operands[0].present)
8862 constraint (inst.operands[0].imm > 255,
8863 _("immediate value out of range"));
8864 inst.instruction |= inst.operands[0].imm;
8869 do_t_branch23 (void)
8871 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8872 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8873 inst.reloc.pc_rel = 1;
8875 /* If the destination of the branch is a defined symbol which does not have
8876 the THUMB_FUNC attribute, then we must be calling a function which has
8877 the (interfacearm) attribute. We look for the Thumb entry point to that
8878 function and change the branch to refer to that function instead. */
8879 if ( inst.reloc.exp.X_op == O_symbol
8880 && inst.reloc.exp.X_add_symbol != NULL
8881 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8882 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8883 inst.reloc.exp.X_add_symbol =
8884 find_real_start (inst.reloc.exp.X_add_symbol);
8890 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8891 inst.instruction |= inst.operands[0].reg << 3;
8892 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8893 should cause the alignment to be checked once it is known. This is
8894 because BX PC only works if the instruction is word aligned. */
8900 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8901 if (inst.operands[0].reg == REG_PC)
8902 as_tsktsk (_("use of r15 in bxj is not really useful"));
8904 inst.instruction |= inst.operands[0].reg << 16;
8910 inst.instruction |= inst.operands[0].reg << 8;
8911 inst.instruction |= inst.operands[1].reg << 16;
8912 inst.instruction |= inst.operands[1].reg;
8918 constraint (current_it_mask, BAD_NOT_IT);
8919 inst.instruction |= inst.operands[0].imm;
8925 constraint (current_it_mask, BAD_NOT_IT);
8927 && (inst.operands[1].present || inst.size_req == 4)
8928 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8930 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8931 inst.instruction = 0xf3af8000;
8932 inst.instruction |= imod << 9;
8933 inst.instruction |= inst.operands[0].imm << 5;
8934 if (inst.operands[1].present)
8935 inst.instruction |= 0x100 | inst.operands[1].imm;
8939 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8940 && (inst.operands[0].imm & 4),
8941 _("selected processor does not support 'A' form "
8942 "of this instruction"));
8943 constraint (inst.operands[1].present || inst.size_req == 4,
8944 _("Thumb does not support the 2-argument "
8945 "form of this instruction"));
8946 inst.instruction |= inst.operands[0].imm;
8950 /* THUMB CPY instruction (argument parse). */
8955 if (inst.size_req == 4)
8957 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8958 inst.instruction |= inst.operands[0].reg << 8;
8959 inst.instruction |= inst.operands[1].reg;
8963 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8964 inst.instruction |= (inst.operands[0].reg & 0x7);
8965 inst.instruction |= inst.operands[1].reg << 3;
8972 constraint (current_it_mask, BAD_NOT_IT);
8973 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8974 inst.instruction |= inst.operands[0].reg;
8975 inst.reloc.pc_rel = 1;
8976 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8982 inst.instruction |= inst.operands[0].imm;
8988 if (!inst.operands[1].present)
8989 inst.operands[1].reg = inst.operands[0].reg;
8990 inst.instruction |= inst.operands[0].reg << 8;
8991 inst.instruction |= inst.operands[1].reg << 16;
8992 inst.instruction |= inst.operands[2].reg;
8998 if (unified_syntax && inst.size_req == 4)
8999 inst.instruction = THUMB_OP32 (inst.instruction);
9001 inst.instruction = THUMB_OP16 (inst.instruction);
9007 unsigned int cond = inst.operands[0].imm;
9009 constraint (current_it_mask, BAD_NOT_IT);
9010 current_it_mask = (inst.instruction & 0xf) | 0x10;
9013 /* If the condition is a negative condition, invert the mask. */
9014 if ((cond & 0x1) == 0x0)
9016 unsigned int mask = inst.instruction & 0x000f;
9018 if ((mask & 0x7) == 0)
9019 /* no conversion needed */;
9020 else if ((mask & 0x3) == 0)
9022 else if ((mask & 0x1) == 0)
9027 inst.instruction &= 0xfff0;
9028 inst.instruction |= mask;
9031 inst.instruction |= cond << 4;
9037 /* This really doesn't seem worth it. */
9038 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9039 _("expression too complex"));
9040 constraint (inst.operands[1].writeback,
9041 _("Thumb load/store multiple does not support {reglist}^"));
9045 /* See if we can use a 16-bit instruction. */
9046 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
9047 && inst.size_req != 4
9048 && inst.operands[0].reg <= 7
9049 && !(inst.operands[1].imm & ~0xff)
9050 && (inst.instruction == T_MNEM_stmia
9051 ? inst.operands[0].writeback
9052 : (inst.operands[0].writeback
9053 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
9055 if (inst.instruction == T_MNEM_stmia
9056 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
9057 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9058 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9059 inst.operands[0].reg);
9061 inst.instruction = THUMB_OP16 (inst.instruction);
9062 inst.instruction |= inst.operands[0].reg << 8;
9063 inst.instruction |= inst.operands[1].imm;
9067 if (inst.operands[1].imm & (1 << 13))
9068 as_warn (_("SP should not be in register list"));
9069 if (inst.instruction == T_MNEM_stmia)
9071 if (inst.operands[1].imm & (1 << 15))
9072 as_warn (_("PC should not be in register list"));
9073 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
9074 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9075 inst.operands[0].reg);
9079 if (inst.operands[1].imm & (1 << 14)
9080 && inst.operands[1].imm & (1 << 15))
9081 as_warn (_("LR and PC should not both be in register list"));
9082 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9083 && inst.operands[0].writeback)
9084 as_warn (_("base register should not be in register list "
9085 "when written back"));
9087 if (inst.instruction < 0xffff)
9088 inst.instruction = THUMB_OP32 (inst.instruction);
9089 inst.instruction |= inst.operands[0].reg << 16;
9090 inst.instruction |= inst.operands[1].imm;
9091 if (inst.operands[0].writeback)
9092 inst.instruction |= WRITE_BACK;
9097 constraint (inst.operands[0].reg > 7
9098 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
9099 if (inst.instruction == T_MNEM_stmia)
9101 if (!inst.operands[0].writeback)
9102 as_warn (_("this instruction will write back the base register"));
9103 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
9104 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
9105 as_warn (_("value stored for r%d is UNPREDICTABLE"),
9106 inst.operands[0].reg);
9110 if (!inst.operands[0].writeback
9111 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
9112 as_warn (_("this instruction will write back the base register"));
9113 else if (inst.operands[0].writeback
9114 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
9115 as_warn (_("this instruction will not write back the base register"));
9118 inst.instruction = THUMB_OP16 (inst.instruction);
9119 inst.instruction |= inst.operands[0].reg << 8;
9120 inst.instruction |= inst.operands[1].imm;
9127 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9128 || inst.operands[1].postind || inst.operands[1].writeback
9129 || inst.operands[1].immisreg || inst.operands[1].shifted
9130 || inst.operands[1].negative,
9133 inst.instruction |= inst.operands[0].reg << 12;
9134 inst.instruction |= inst.operands[1].reg << 16;
9135 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9141 if (!inst.operands[1].present)
9143 constraint (inst.operands[0].reg == REG_LR,
9144 _("r14 not allowed as first register "
9145 "when second register is omitted"));
9146 inst.operands[1].reg = inst.operands[0].reg + 1;
9148 constraint (inst.operands[0].reg == inst.operands[1].reg,
9151 inst.instruction |= inst.operands[0].reg << 12;
9152 inst.instruction |= inst.operands[1].reg << 8;
9153 inst.instruction |= inst.operands[2].reg << 16;
9159 unsigned long opcode;
9162 opcode = inst.instruction;
9165 if (!inst.operands[1].isreg)
9167 if (opcode <= 0xffff)
9168 inst.instruction = THUMB_OP32 (opcode);
9169 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9172 if (inst.operands[1].isreg
9173 && !inst.operands[1].writeback
9174 && !inst.operands[1].shifted && !inst.operands[1].postind
9175 && !inst.operands[1].negative && inst.operands[0].reg <= 7
9177 && inst.size_req != 4)
9179 /* Insn may have a 16-bit form. */
9180 Rn = inst.operands[1].reg;
9181 if (inst.operands[1].immisreg)
9183 inst.instruction = THUMB_OP16 (opcode);
9185 if (Rn <= 7 && inst.operands[1].imm <= 7)
9188 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
9189 && opcode != T_MNEM_ldrsb)
9190 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
9191 || (Rn == REG_SP && opcode == T_MNEM_str))
9198 if (inst.reloc.pc_rel)
9199 opcode = T_MNEM_ldr_pc2;
9201 opcode = T_MNEM_ldr_pc;
9205 if (opcode == T_MNEM_ldr)
9206 opcode = T_MNEM_ldr_sp;
9208 opcode = T_MNEM_str_sp;
9210 inst.instruction = inst.operands[0].reg << 8;
9214 inst.instruction = inst.operands[0].reg;
9215 inst.instruction |= inst.operands[1].reg << 3;
9217 inst.instruction |= THUMB_OP16 (opcode);
9218 if (inst.size_req == 2)
9219 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9221 inst.relax = opcode;
9225 /* Definitely a 32-bit variant. */
9226 inst.instruction = THUMB_OP32 (opcode);
9227 inst.instruction |= inst.operands[0].reg << 12;
9228 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
9232 constraint (inst.operands[0].reg > 7, BAD_HIREG);
9234 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
9236 /* Only [Rn,Rm] is acceptable. */
9237 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
9238 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
9239 || inst.operands[1].postind || inst.operands[1].shifted
9240 || inst.operands[1].negative,
9241 _("Thumb does not support this addressing mode"));
9242 inst.instruction = THUMB_OP16 (inst.instruction);
9246 inst.instruction = THUMB_OP16 (inst.instruction);
9247 if (!inst.operands[1].isreg)
9248 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
9251 constraint (!inst.operands[1].preind
9252 || inst.operands[1].shifted
9253 || inst.operands[1].writeback,
9254 _("Thumb does not support this addressing mode"));
9255 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
9257 constraint (inst.instruction & 0x0600,
9258 _("byte or halfword not valid for base register"));
9259 constraint (inst.operands[1].reg == REG_PC
9260 && !(inst.instruction & THUMB_LOAD_BIT),
9261 _("r15 based store not allowed"));
9262 constraint (inst.operands[1].immisreg,
9263 _("invalid base register for register offset"));
9265 if (inst.operands[1].reg == REG_PC)
9266 inst.instruction = T_OPCODE_LDR_PC;
9267 else if (inst.instruction & THUMB_LOAD_BIT)
9268 inst.instruction = T_OPCODE_LDR_SP;
9270 inst.instruction = T_OPCODE_STR_SP;
9272 inst.instruction |= inst.operands[0].reg << 8;
9273 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9277 constraint (inst.operands[1].reg > 7, BAD_HIREG);
9278 if (!inst.operands[1].immisreg)
9280 /* Immediate offset. */
9281 inst.instruction |= inst.operands[0].reg;
9282 inst.instruction |= inst.operands[1].reg << 3;
9283 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
9287 /* Register offset. */
9288 constraint (inst.operands[1].imm > 7, BAD_HIREG);
9289 constraint (inst.operands[1].negative,
9290 _("Thumb does not support this addressing mode"));
9293 switch (inst.instruction)
9295 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
9296 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
9297 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
9298 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
9299 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
9300 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
9301 case 0x5600 /* ldrsb */:
9302 case 0x5e00 /* ldrsh */: break;
9306 inst.instruction |= inst.operands[0].reg;
9307 inst.instruction |= inst.operands[1].reg << 3;
9308 inst.instruction |= inst.operands[1].imm << 6;
9314 if (!inst.operands[1].present)
9316 inst.operands[1].reg = inst.operands[0].reg + 1;
9317 constraint (inst.operands[0].reg == REG_LR,
9318 _("r14 not allowed here"));
9320 inst.instruction |= inst.operands[0].reg << 12;
9321 inst.instruction |= inst.operands[1].reg << 8;
9322 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
9329 inst.instruction |= inst.operands[0].reg << 12;
9330 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
9336 inst.instruction |= inst.operands[0].reg << 8;
9337 inst.instruction |= inst.operands[1].reg << 16;
9338 inst.instruction |= inst.operands[2].reg;
9339 inst.instruction |= inst.operands[3].reg << 12;
9345 inst.instruction |= inst.operands[0].reg << 12;
9346 inst.instruction |= inst.operands[1].reg << 8;
9347 inst.instruction |= inst.operands[2].reg << 16;
9348 inst.instruction |= inst.operands[3].reg;
9356 int r0off = (inst.instruction == T_MNEM_mov
9357 || inst.instruction == T_MNEM_movs) ? 8 : 16;
9358 unsigned long opcode;
9360 bfd_boolean low_regs;
9362 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
9363 opcode = inst.instruction;
9364 if (current_it_mask)
9365 narrow = opcode != T_MNEM_movs;
9367 narrow = opcode != T_MNEM_movs || low_regs;
9368 if (inst.size_req == 4
9369 || inst.operands[1].shifted)
9372 if (!inst.operands[1].isreg)
9374 /* Immediate operand. */
9375 if (current_it_mask == 0 && opcode == T_MNEM_mov)
9377 if (low_regs && narrow)
9379 inst.instruction = THUMB_OP16 (opcode);
9380 inst.instruction |= inst.operands[0].reg << 8;
9381 if (inst.size_req == 2)
9382 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9384 inst.relax = opcode;
9388 inst.instruction = THUMB_OP32 (inst.instruction);
9389 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9390 inst.instruction |= inst.operands[0].reg << r0off;
9391 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9396 inst.instruction = THUMB_OP32 (inst.instruction);
9397 inst.instruction |= inst.operands[0].reg << r0off;
9398 encode_thumb32_shifted_operand (1);
9401 switch (inst.instruction)
9404 inst.instruction = T_OPCODE_MOV_HR;
9405 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9406 inst.instruction |= (inst.operands[0].reg & 0x7);
9407 inst.instruction |= inst.operands[1].reg << 3;
9411 /* We know we have low registers at this point.
9412 Generate ADD Rd, Rs, #0. */
9413 inst.instruction = T_OPCODE_ADD_I3;
9414 inst.instruction |= inst.operands[0].reg;
9415 inst.instruction |= inst.operands[1].reg << 3;
9421 inst.instruction = T_OPCODE_CMP_LR;
9422 inst.instruction |= inst.operands[0].reg;
9423 inst.instruction |= inst.operands[1].reg << 3;
9427 inst.instruction = T_OPCODE_CMP_HR;
9428 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
9429 inst.instruction |= (inst.operands[0].reg & 0x7);
9430 inst.instruction |= inst.operands[1].reg << 3;
9437 inst.instruction = THUMB_OP16 (inst.instruction);
9438 if (inst.operands[1].isreg)
9440 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
9442 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
9443 since a MOV instruction produces unpredictable results. */
9444 if (inst.instruction == T_OPCODE_MOV_I8)
9445 inst.instruction = T_OPCODE_ADD_I3;
9447 inst.instruction = T_OPCODE_CMP_LR;
9449 inst.instruction |= inst.operands[0].reg;
9450 inst.instruction |= inst.operands[1].reg << 3;
9454 if (inst.instruction == T_OPCODE_MOV_I8)
9455 inst.instruction = T_OPCODE_MOV_HR;
9457 inst.instruction = T_OPCODE_CMP_HR;
9463 constraint (inst.operands[0].reg > 7,
9464 _("only lo regs allowed with immediate"));
9465 inst.instruction |= inst.operands[0].reg << 8;
9466 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
9476 top = (inst.instruction & 0x00800000) != 0;
9477 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
9479 constraint (top, _(":lower16: not allowed this instruction"));
9480 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
9482 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
9484 constraint (!top, _(":upper16: not allowed this instruction"));
9485 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
9488 inst.instruction |= inst.operands[0].reg << 8;
9489 if (inst.reloc.type == BFD_RELOC_UNUSED)
9491 imm = inst.reloc.exp.X_add_number;
9492 inst.instruction |= (imm & 0xf000) << 4;
9493 inst.instruction |= (imm & 0x0800) << 15;
9494 inst.instruction |= (imm & 0x0700) << 4;
9495 inst.instruction |= (imm & 0x00ff);
9504 int r0off = (inst.instruction == T_MNEM_mvn
9505 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
9508 if (inst.size_req == 4
9509 || inst.instruction > 0xffff
9510 || inst.operands[1].shifted
9511 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9513 else if (inst.instruction == T_MNEM_cmn)
9515 else if (THUMB_SETS_FLAGS (inst.instruction))
9516 narrow = (current_it_mask == 0);
9518 narrow = (current_it_mask != 0);
9520 if (!inst.operands[1].isreg)
9522 /* For an immediate, we always generate a 32-bit opcode;
9523 section relaxation will shrink it later if possible. */
9524 if (inst.instruction < 0xffff)
9525 inst.instruction = THUMB_OP32 (inst.instruction);
9526 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9527 inst.instruction |= inst.operands[0].reg << r0off;
9528 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9532 /* See if we can do this with a 16-bit instruction. */
9535 inst.instruction = THUMB_OP16 (inst.instruction);
9536 inst.instruction |= inst.operands[0].reg;
9537 inst.instruction |= inst.operands[1].reg << 3;
9541 constraint (inst.operands[1].shifted
9542 && inst.operands[1].immisreg,
9543 _("shift must be constant"));
9544 if (inst.instruction < 0xffff)
9545 inst.instruction = THUMB_OP32 (inst.instruction);
9546 inst.instruction |= inst.operands[0].reg << r0off;
9547 encode_thumb32_shifted_operand (1);
9553 constraint (inst.instruction > 0xffff
9554 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
9555 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
9556 _("unshifted register required"));
9557 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9560 inst.instruction = THUMB_OP16 (inst.instruction);
9561 inst.instruction |= inst.operands[0].reg;
9562 inst.instruction |= inst.operands[1].reg << 3;
9571 if (do_vfp_nsyn_mrs () == SUCCESS)
9574 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
9577 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9578 _("selected processor does not support "
9579 "requested special purpose register"));
9583 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9584 _("selected processor does not support "
9585 "requested special purpose register %x"));
9586 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
9587 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
9588 _("'CPSR' or 'SPSR' expected"));
9591 inst.instruction |= inst.operands[0].reg << 8;
9592 inst.instruction |= (flags & SPSR_BIT) >> 2;
9593 inst.instruction |= inst.operands[1].imm & 0xff;
9601 if (do_vfp_nsyn_msr () == SUCCESS)
9604 constraint (!inst.operands[1].isreg,
9605 _("Thumb encoding does not support an immediate here"));
9606 flags = inst.operands[0].imm;
9609 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
9610 _("selected processor does not support "
9611 "requested special purpose register"));
9615 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
9616 _("selected processor does not support "
9617 "requested special purpose register"));
9620 inst.instruction |= (flags & SPSR_BIT) >> 2;
9621 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
9622 inst.instruction |= (flags & 0xff);
9623 inst.instruction |= inst.operands[1].reg << 16;
9629 if (!inst.operands[2].present)
9630 inst.operands[2].reg = inst.operands[0].reg;
9632 /* There is no 32-bit MULS and no 16-bit MUL. */
9633 if (unified_syntax && inst.instruction == T_MNEM_mul)
9635 inst.instruction = THUMB_OP32 (inst.instruction);
9636 inst.instruction |= inst.operands[0].reg << 8;
9637 inst.instruction |= inst.operands[1].reg << 16;
9638 inst.instruction |= inst.operands[2].reg << 0;
9642 constraint (!unified_syntax
9643 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
9644 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9647 inst.instruction = THUMB_OP16 (inst.instruction);
9648 inst.instruction |= inst.operands[0].reg;
9650 if (inst.operands[0].reg == inst.operands[1].reg)
9651 inst.instruction |= inst.operands[2].reg << 3;
9652 else if (inst.operands[0].reg == inst.operands[2].reg)
9653 inst.instruction |= inst.operands[1].reg << 3;
9655 constraint (1, _("dest must overlap one source register"));
9662 inst.instruction |= inst.operands[0].reg << 12;
9663 inst.instruction |= inst.operands[1].reg << 8;
9664 inst.instruction |= inst.operands[2].reg << 16;
9665 inst.instruction |= inst.operands[3].reg;
9667 if (inst.operands[0].reg == inst.operands[1].reg)
9668 as_tsktsk (_("rdhi and rdlo must be different"));
9676 if (inst.size_req == 4 || inst.operands[0].imm > 15)
9678 inst.instruction = THUMB_OP32 (inst.instruction);
9679 inst.instruction |= inst.operands[0].imm;
9683 inst.instruction = THUMB_OP16 (inst.instruction);
9684 inst.instruction |= inst.operands[0].imm << 4;
9689 constraint (inst.operands[0].present,
9690 _("Thumb does not support NOP with hints"));
9691 inst.instruction = 0x46c0;
9702 if (THUMB_SETS_FLAGS (inst.instruction))
9703 narrow = (current_it_mask == 0);
9705 narrow = (current_it_mask != 0);
9706 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9708 if (inst.size_req == 4)
9713 inst.instruction = THUMB_OP32 (inst.instruction);
9714 inst.instruction |= inst.operands[0].reg << 8;
9715 inst.instruction |= inst.operands[1].reg << 16;
9719 inst.instruction = THUMB_OP16 (inst.instruction);
9720 inst.instruction |= inst.operands[0].reg;
9721 inst.instruction |= inst.operands[1].reg << 3;
9726 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9728 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9730 inst.instruction = THUMB_OP16 (inst.instruction);
9731 inst.instruction |= inst.operands[0].reg;
9732 inst.instruction |= inst.operands[1].reg << 3;
9739 inst.instruction |= inst.operands[0].reg << 8;
9740 inst.instruction |= inst.operands[1].reg << 16;
9741 inst.instruction |= inst.operands[2].reg;
9742 if (inst.operands[3].present)
9744 unsigned int val = inst.reloc.exp.X_add_number;
9745 constraint (inst.reloc.exp.X_op != O_constant,
9746 _("expression too complex"));
9747 inst.instruction |= (val & 0x1c) << 10;
9748 inst.instruction |= (val & 0x03) << 6;
9755 if (!inst.operands[3].present)
9756 inst.instruction &= ~0x00000020;
9763 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9767 do_t_push_pop (void)
9771 constraint (inst.operands[0].writeback,
9772 _("push/pop do not support {reglist}^"));
9773 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9774 _("expression too complex"));
9776 mask = inst.operands[0].imm;
9777 if ((mask & ~0xff) == 0)
9778 inst.instruction = THUMB_OP16 (inst.instruction);
9779 else if ((inst.instruction == T_MNEM_push
9780 && (mask & ~0xff) == 1 << REG_LR)
9781 || (inst.instruction == T_MNEM_pop
9782 && (mask & ~0xff) == 1 << REG_PC))
9784 inst.instruction = THUMB_OP16 (inst.instruction);
9785 inst.instruction |= THUMB_PP_PC_LR;
9788 else if (unified_syntax)
9790 if (mask & (1 << 13))
9791 inst.error = _("SP not allowed in register list");
9792 if (inst.instruction == T_MNEM_push)
9794 if (mask & (1 << 15))
9795 inst.error = _("PC not allowed in register list");
9799 if (mask & (1 << 14)
9800 && mask & (1 << 15))
9801 inst.error = _("LR and PC should not both be in register list");
9803 if ((mask & (mask - 1)) == 0)
9805 /* Single register push/pop implemented as str/ldr. */
9806 if (inst.instruction == T_MNEM_push)
9807 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9809 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9810 mask = ffs(mask) - 1;
9814 inst.instruction = THUMB_OP32 (inst.instruction);
9818 inst.error = _("invalid register list to push/pop instruction");
9822 inst.instruction |= mask;
9828 inst.instruction |= inst.operands[0].reg << 8;
9829 inst.instruction |= inst.operands[1].reg << 16;
9835 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9836 && inst.size_req != 4)
9838 inst.instruction = THUMB_OP16 (inst.instruction);
9839 inst.instruction |= inst.operands[0].reg;
9840 inst.instruction |= inst.operands[1].reg << 3;
9842 else if (unified_syntax)
9844 inst.instruction = THUMB_OP32 (inst.instruction);
9845 inst.instruction |= inst.operands[0].reg << 8;
9846 inst.instruction |= inst.operands[1].reg << 16;
9847 inst.instruction |= inst.operands[1].reg;
9850 inst.error = BAD_HIREG;
9858 Rd = inst.operands[0].reg;
9859 Rs = (inst.operands[1].present
9860 ? inst.operands[1].reg /* Rd, Rs, foo */
9861 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9863 inst.instruction |= Rd << 8;
9864 inst.instruction |= Rs << 16;
9865 if (!inst.operands[2].isreg)
9867 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9868 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9871 encode_thumb32_shifted_operand (2);
9877 constraint (current_it_mask, BAD_NOT_IT);
9878 if (inst.operands[0].imm)
9879 inst.instruction |= 0x8;
9885 if (!inst.operands[1].present)
9886 inst.operands[1].reg = inst.operands[0].reg;
9893 switch (inst.instruction)
9896 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9898 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9900 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9902 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9906 if (THUMB_SETS_FLAGS (inst.instruction))
9907 narrow = (current_it_mask == 0);
9909 narrow = (current_it_mask != 0);
9910 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9912 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9914 if (inst.operands[2].isreg
9915 && (inst.operands[1].reg != inst.operands[0].reg
9916 || inst.operands[2].reg > 7))
9918 if (inst.size_req == 4)
9923 if (inst.operands[2].isreg)
9925 inst.instruction = THUMB_OP32 (inst.instruction);
9926 inst.instruction |= inst.operands[0].reg << 8;
9927 inst.instruction |= inst.operands[1].reg << 16;
9928 inst.instruction |= inst.operands[2].reg;
9932 inst.operands[1].shifted = 1;
9933 inst.operands[1].shift_kind = shift_kind;
9934 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9935 ? T_MNEM_movs : T_MNEM_mov);
9936 inst.instruction |= inst.operands[0].reg << 8;
9937 encode_thumb32_shifted_operand (1);
9938 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9939 inst.reloc.type = BFD_RELOC_UNUSED;
9944 if (inst.operands[2].isreg)
9948 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9949 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9950 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9951 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9955 inst.instruction |= inst.operands[0].reg;
9956 inst.instruction |= inst.operands[2].reg << 3;
9962 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9963 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9964 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9967 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9968 inst.instruction |= inst.operands[0].reg;
9969 inst.instruction |= inst.operands[1].reg << 3;
9975 constraint (inst.operands[0].reg > 7
9976 || inst.operands[1].reg > 7, BAD_HIREG);
9977 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9979 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9981 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9982 constraint (inst.operands[0].reg != inst.operands[1].reg,
9983 _("source1 and dest must be same register"));
9985 switch (inst.instruction)
9987 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9988 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9989 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9990 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9994 inst.instruction |= inst.operands[0].reg;
9995 inst.instruction |= inst.operands[2].reg << 3;
9999 switch (inst.instruction)
10001 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
10002 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
10003 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
10004 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
10007 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
10008 inst.instruction |= inst.operands[0].reg;
10009 inst.instruction |= inst.operands[1].reg << 3;
10017 inst.instruction |= inst.operands[0].reg << 8;
10018 inst.instruction |= inst.operands[1].reg << 16;
10019 inst.instruction |= inst.operands[2].reg;
10025 unsigned int value = inst.reloc.exp.X_add_number;
10026 constraint (inst.reloc.exp.X_op != O_constant,
10027 _("expression too complex"));
10028 inst.reloc.type = BFD_RELOC_UNUSED;
10029 inst.instruction |= (value & 0xf000) >> 12;
10030 inst.instruction |= (value & 0x0ff0);
10031 inst.instruction |= (value & 0x000f) << 16;
10037 inst.instruction |= inst.operands[0].reg << 8;
10038 inst.instruction |= inst.operands[1].imm - 1;
10039 inst.instruction |= inst.operands[2].reg << 16;
10041 if (inst.operands[3].present)
10043 constraint (inst.reloc.exp.X_op != O_constant,
10044 _("expression too complex"));
10046 if (inst.reloc.exp.X_add_number != 0)
10048 if (inst.operands[3].shift_kind == SHIFT_ASR)
10049 inst.instruction |= 0x00200000; /* sh bit */
10050 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10051 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10053 inst.reloc.type = BFD_RELOC_UNUSED;
10060 inst.instruction |= inst.operands[0].reg << 8;
10061 inst.instruction |= inst.operands[1].imm - 1;
10062 inst.instruction |= inst.operands[2].reg << 16;
10068 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10069 || inst.operands[2].postind || inst.operands[2].writeback
10070 || inst.operands[2].immisreg || inst.operands[2].shifted
10071 || inst.operands[2].negative,
10074 inst.instruction |= inst.operands[0].reg << 8;
10075 inst.instruction |= inst.operands[1].reg << 12;
10076 inst.instruction |= inst.operands[2].reg << 16;
10077 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
10083 if (!inst.operands[2].present)
10084 inst.operands[2].reg = inst.operands[1].reg + 1;
10086 constraint (inst.operands[0].reg == inst.operands[1].reg
10087 || inst.operands[0].reg == inst.operands[2].reg
10088 || inst.operands[0].reg == inst.operands[3].reg
10089 || inst.operands[1].reg == inst.operands[2].reg,
10092 inst.instruction |= inst.operands[0].reg;
10093 inst.instruction |= inst.operands[1].reg << 12;
10094 inst.instruction |= inst.operands[2].reg << 8;
10095 inst.instruction |= inst.operands[3].reg << 16;
10101 inst.instruction |= inst.operands[0].reg << 8;
10102 inst.instruction |= inst.operands[1].reg << 16;
10103 inst.instruction |= inst.operands[2].reg;
10104 inst.instruction |= inst.operands[3].imm << 4;
10110 if (inst.instruction <= 0xffff && inst.size_req != 4
10111 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
10112 && (!inst.operands[2].present || inst.operands[2].imm == 0))
10114 inst.instruction = THUMB_OP16 (inst.instruction);
10115 inst.instruction |= inst.operands[0].reg;
10116 inst.instruction |= inst.operands[1].reg << 3;
10118 else if (unified_syntax)
10120 if (inst.instruction <= 0xffff)
10121 inst.instruction = THUMB_OP32 (inst.instruction);
10122 inst.instruction |= inst.operands[0].reg << 8;
10123 inst.instruction |= inst.operands[1].reg;
10124 inst.instruction |= inst.operands[2].imm << 4;
10128 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
10129 _("Thumb encoding does not support rotation"));
10130 constraint (1, BAD_HIREG);
10137 inst.reloc.type = BFD_RELOC_ARM_SWI;
10145 half = (inst.instruction & 0x10) != 0;
10146 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
10147 constraint (inst.operands[0].immisreg,
10148 _("instruction requires register index"));
10149 constraint (inst.operands[0].imm == 15,
10150 _("PC is not a valid index register"));
10151 constraint (!half && inst.operands[0].shifted,
10152 _("instruction does not allow shifted index"));
10153 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
10159 inst.instruction |= inst.operands[0].reg << 8;
10160 inst.instruction |= inst.operands[1].imm;
10161 inst.instruction |= inst.operands[2].reg << 16;
10163 if (inst.operands[3].present)
10165 constraint (inst.reloc.exp.X_op != O_constant,
10166 _("expression too complex"));
10167 if (inst.reloc.exp.X_add_number != 0)
10169 if (inst.operands[3].shift_kind == SHIFT_ASR)
10170 inst.instruction |= 0x00200000; /* sh bit */
10172 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
10173 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
10175 inst.reloc.type = BFD_RELOC_UNUSED;
10182 inst.instruction |= inst.operands[0].reg << 8;
10183 inst.instruction |= inst.operands[1].imm;
10184 inst.instruction |= inst.operands[2].reg << 16;
10187 /* Neon instruction encoder helpers. */
10189 /* Encodings for the different types for various Neon opcodes. */
10191 /* An "invalid" code for the following tables. */
10194 struct neon_tab_entry
10197 unsigned float_or_poly;
10198 unsigned scalar_or_imm;
10201 /* Map overloaded Neon opcodes to their respective encodings. */
10202 #define NEON_ENC_TAB \
10203 X(vabd, 0x0000700, 0x1200d00, N_INV), \
10204 X(vmax, 0x0000600, 0x0000f00, N_INV), \
10205 X(vmin, 0x0000610, 0x0200f00, N_INV), \
10206 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
10207 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
10208 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
10209 X(vadd, 0x0000800, 0x0000d00, N_INV), \
10210 X(vsub, 0x1000800, 0x0200d00, N_INV), \
10211 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
10212 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
10213 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
10214 /* Register variants of the following two instructions are encoded as
10215 vcge / vcgt with the operands reversed. */ \
10216 X(vclt, 0x0000300, 0x1200e00, 0x1b10200), \
10217 X(vcle, 0x0000310, 0x1000e00, 0x1b10180), \
10218 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
10219 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
10220 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
10221 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
10222 X(vmlal, 0x0800800, N_INV, 0x0800240), \
10223 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
10224 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
10225 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
10226 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
10227 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
10228 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
10229 X(vshl, 0x0000400, N_INV, 0x0800510), \
10230 X(vqshl, 0x0000410, N_INV, 0x0800710), \
10231 X(vand, 0x0000110, N_INV, 0x0800030), \
10232 X(vbic, 0x0100110, N_INV, 0x0800030), \
10233 X(veor, 0x1000110, N_INV, N_INV), \
10234 X(vorn, 0x0300110, N_INV, 0x0800010), \
10235 X(vorr, 0x0200110, N_INV, 0x0800010), \
10236 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
10237 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
10238 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
10239 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
10240 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
10241 X(vst1, 0x0000000, 0x0800000, N_INV), \
10242 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
10243 X(vst2, 0x0000100, 0x0800100, N_INV), \
10244 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
10245 X(vst3, 0x0000200, 0x0800200, N_INV), \
10246 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
10247 X(vst4, 0x0000300, 0x0800300, N_INV), \
10248 X(vmovn, 0x1b20200, N_INV, N_INV), \
10249 X(vtrn, 0x1b20080, N_INV, N_INV), \
10250 X(vqmovn, 0x1b20200, N_INV, N_INV), \
10251 X(vqmovun, 0x1b20240, N_INV, N_INV), \
10252 X(vnmul, 0xe200a40, 0xe200b40, N_INV), \
10253 X(vnmla, 0xe000a40, 0xe000b40, N_INV), \
10254 X(vnmls, 0xe100a40, 0xe100b40, N_INV), \
10255 X(vcmp, 0xeb40a40, 0xeb40b40, N_INV), \
10256 X(vcmpz, 0xeb50a40, 0xeb50b40, N_INV), \
10257 X(vcmpe, 0xeb40ac0, 0xeb40bc0, N_INV), \
10258 X(vcmpez, 0xeb50ac0, 0xeb50bc0, N_INV)
10262 #define X(OPC,I,F,S) N_MNEM_##OPC
10267 static const struct neon_tab_entry neon_enc_tab[] =
10269 #define X(OPC,I,F,S) { (I), (F), (S) }
10274 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10275 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10276 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10277 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10278 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10279 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10280 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
10281 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
10282 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
10283 #define NEON_ENC_SINGLE(X) \
10284 ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
10285 #define NEON_ENC_DOUBLE(X) \
10286 ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
10288 /* Define shapes for instruction operands. The following mnemonic characters
10289 are used in this table:
10291 F - VFP S<n> register
10292 D - Neon D<n> register
10293 Q - Neon Q<n> register
10297 L - D<n> register list
10299 This table is used to generate various data:
10300 - enumerations of the form NS_DDR to be used as arguments to
10302 - a table classifying shapes into single, double, quad, mixed.
10303 - a table used to drive neon_select_shape.
10306 #define NEON_SHAPE_DEF \
10307 X(3, (D, D, D), DOUBLE), \
10308 X(3, (Q, Q, Q), QUAD), \
10309 X(3, (D, D, I), DOUBLE), \
10310 X(3, (Q, Q, I), QUAD), \
10311 X(3, (D, D, S), DOUBLE), \
10312 X(3, (Q, Q, S), QUAD), \
10313 X(2, (D, D), DOUBLE), \
10314 X(2, (Q, Q), QUAD), \
10315 X(2, (D, S), DOUBLE), \
10316 X(2, (Q, S), QUAD), \
10317 X(2, (D, R), DOUBLE), \
10318 X(2, (Q, R), QUAD), \
10319 X(2, (D, I), DOUBLE), \
10320 X(2, (Q, I), QUAD), \
10321 X(3, (D, L, D), DOUBLE), \
10322 X(2, (D, Q), MIXED), \
10323 X(2, (Q, D), MIXED), \
10324 X(3, (D, Q, I), MIXED), \
10325 X(3, (Q, D, I), MIXED), \
10326 X(3, (Q, D, D), MIXED), \
10327 X(3, (D, Q, Q), MIXED), \
10328 X(3, (Q, Q, D), MIXED), \
10329 X(3, (Q, D, S), MIXED), \
10330 X(3, (D, Q, S), MIXED), \
10331 X(4, (D, D, D, I), DOUBLE), \
10332 X(4, (Q, Q, Q, I), QUAD), \
10333 X(2, (F, F), SINGLE), \
10334 X(3, (F, F, F), SINGLE), \
10335 X(2, (F, I), SINGLE), \
10336 X(2, (F, D), MIXED), \
10337 X(2, (D, F), MIXED), \
10338 X(3, (F, F, I), MIXED), \
10339 X(4, (R, R, F, F), SINGLE), \
10340 X(4, (F, F, R, R), SINGLE), \
10341 X(3, (D, R, R), DOUBLE), \
10342 X(3, (R, R, D), DOUBLE), \
10343 X(2, (S, R), SINGLE), \
10344 X(2, (R, S), SINGLE), \
10345 X(2, (F, R), SINGLE), \
10346 X(2, (R, F), SINGLE)
10348 #define S2(A,B) NS_##A##B
10349 #define S3(A,B,C) NS_##A##B##C
10350 #define S4(A,B,C,D) NS_##A##B##C##D
10352 #define X(N, L, C) S##N L
10365 enum neon_shape_class
10373 #define X(N, L, C) SC_##C
10375 static enum neon_shape_class neon_shape_class[] =
10393 /* Register widths of above. */
10394 static unsigned neon_shape_el_size[] =
10405 struct neon_shape_info
10408 enum neon_shape_el el[NEON_MAX_TYPE_ELS];
10411 #define S2(A,B) { SE_##A, SE_##B }
10412 #define S3(A,B,C) { SE_##A, SE_##B, SE_##C }
10413 #define S4(A,B,C,D) { SE_##A, SE_##B, SE_##C, SE_##D }
10415 #define X(N, L, C) { N, S##N L }
10417 static struct neon_shape_info neon_shape_tab[] =
10427 /* Bit masks used in type checking given instructions.
10428 'N_EQK' means the type must be the same as (or based on in some way) the key
10429 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
10430 set, various other bits can be set as well in order to modify the meaning of
10431 the type constraint. */
10433 enum neon_type_mask
10455 N_KEY = 0x100000, /* key element (main type specifier). */
10456 N_EQK = 0x200000, /* given operand has the same type & size as the key. */
10457 N_VFP = 0x400000, /* VFP mode: operand size must match register width. */
10458 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
10459 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
10460 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
10461 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
10462 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
10463 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
10464 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
10466 N_MAX_NONSPECIAL = N_F64
10469 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
10471 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
10472 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
10473 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
10474 #define N_SUF_32 (N_SU_32 | N_F32)
10475 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
10476 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
10478 /* Pass this as the first type argument to neon_check_type to ignore types
10480 #define N_IGNORE_TYPE (N_KEY | N_EQK)
10482 /* Select a "shape" for the current instruction (describing register types or
10483 sizes) from a list of alternatives. Return NS_NULL if the current instruction
10484 doesn't fit. For non-polymorphic shapes, checking is usually done as a
10485 function of operand parsing, so this function doesn't need to be called.
10486 Shapes should be listed in order of decreasing length. */
10488 static enum neon_shape
10489 neon_select_shape (enum neon_shape shape, ...)
10492 enum neon_shape first_shape = shape;
10494 /* Fix missing optional operands. FIXME: we don't know at this point how
10495 many arguments we should have, so this makes the assumption that we have
10496 > 1. This is true of all current Neon opcodes, I think, but may not be
10497 true in the future. */
10498 if (!inst.operands[1].present)
10499 inst.operands[1] = inst.operands[0];
10501 va_start (ap, shape);
10503 for (; shape != NS_NULL; shape = va_arg (ap, int))
10508 for (j = 0; j < neon_shape_tab[shape].els; j++)
10510 if (!inst.operands[j].present)
10516 switch (neon_shape_tab[shape].el[j])
10519 if (!(inst.operands[j].isreg
10520 && inst.operands[j].isvec
10521 && inst.operands[j].issingle
10522 && !inst.operands[j].isquad))
10527 if (!(inst.operands[j].isreg
10528 && inst.operands[j].isvec
10529 && !inst.operands[j].isquad
10530 && !inst.operands[j].issingle))
10535 if (!(inst.operands[j].isreg
10536 && !inst.operands[j].isvec))
10541 if (!(inst.operands[j].isreg
10542 && inst.operands[j].isvec
10543 && inst.operands[j].isquad
10544 && !inst.operands[j].issingle))
10549 if (!(!inst.operands[j].isreg
10550 && !inst.operands[j].isscalar))
10555 if (!(!inst.operands[j].isreg
10556 && inst.operands[j].isscalar))
10570 if (shape == NS_NULL && first_shape != NS_NULL)
10571 first_error (_("invalid instruction shape"));
10576 /* True if SHAPE is predominantly a quadword operation (most of the time, this
10577 means the Q bit should be set). */
10580 neon_quad (enum neon_shape shape)
10582 return neon_shape_class[shape] == SC_QUAD;
10586 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
10589 /* Allow modification to be made to types which are constrained to be
10590 based on the key element, based on bits set alongside N_EQK. */
10591 if ((typebits & N_EQK) != 0)
10593 if ((typebits & N_HLF) != 0)
10595 else if ((typebits & N_DBL) != 0)
10597 if ((typebits & N_SGN) != 0)
10598 *g_type = NT_signed;
10599 else if ((typebits & N_UNS) != 0)
10600 *g_type = NT_unsigned;
10601 else if ((typebits & N_INT) != 0)
10602 *g_type = NT_integer;
10603 else if ((typebits & N_FLT) != 0)
10604 *g_type = NT_float;
10605 else if ((typebits & N_SIZ) != 0)
10606 *g_type = NT_untyped;
10610 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
10611 operand type, i.e. the single type specified in a Neon instruction when it
10612 is the only one given. */
10614 static struct neon_type_el
10615 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
10617 struct neon_type_el dest = *key;
10619 assert ((thisarg & N_EQK) != 0);
10621 neon_modify_type_size (thisarg, &dest.type, &dest.size);
10626 /* Convert Neon type and size into compact bitmask representation. */
10628 static enum neon_type_mask
10629 type_chk_of_el_type (enum neon_el_type type, unsigned size)
10636 case 8: return N_8;
10637 case 16: return N_16;
10638 case 32: return N_32;
10639 case 64: return N_64;
10647 case 8: return N_I8;
10648 case 16: return N_I16;
10649 case 32: return N_I32;
10650 case 64: return N_I64;
10658 case 32: return N_F32;
10659 case 64: return N_F64;
10667 case 8: return N_P8;
10668 case 16: return N_P16;
10676 case 8: return N_S8;
10677 case 16: return N_S16;
10678 case 32: return N_S32;
10679 case 64: return N_S64;
10687 case 8: return N_U8;
10688 case 16: return N_U16;
10689 case 32: return N_U32;
10690 case 64: return N_U64;
10701 /* Convert compact Neon bitmask type representation to a type and size. Only
10702 handles the case where a single bit is set in the mask. */
10705 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
10706 enum neon_type_mask mask)
10708 if ((mask & N_EQK) != 0)
10711 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
10713 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
10715 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
10717 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64)) != 0)
10722 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
10724 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
10725 *type = NT_unsigned;
10726 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
10727 *type = NT_integer;
10728 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
10729 *type = NT_untyped;
10730 else if ((mask & (N_P8 | N_P16)) != 0)
10732 else if ((mask & (N_F32 | N_F64)) != 0)
10740 /* Modify a bitmask of allowed types. This is only needed for type
10744 modify_types_allowed (unsigned allowed, unsigned mods)
10747 enum neon_el_type type;
10753 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
10755 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
10757 neon_modify_type_size (mods, &type, &size);
10758 destmask |= type_chk_of_el_type (type, size);
10765 /* Check type and return type classification.
10766 The manual states (paraphrase): If one datatype is given, it indicates the
10768 - the second operand, if there is one
10769 - the operand, if there is no second operand
10770 - the result, if there are no operands.
10771 This isn't quite good enough though, so we use a concept of a "key" datatype
10772 which is set on a per-instruction basis, which is the one which matters when
10773 only one data type is written.
10774 Note: this function has side-effects (e.g. filling in missing operands). All
10775 Neon instructions should call it before performing bit encoding. */
10777 static struct neon_type_el
10778 neon_check_type (unsigned els, enum neon_shape ns, ...)
10781 unsigned i, pass, key_el = 0;
10782 unsigned types[NEON_MAX_TYPE_ELS];
10783 enum neon_el_type k_type = NT_invtype;
10784 unsigned k_size = -1u;
10785 struct neon_type_el badtype = {NT_invtype, -1};
10786 unsigned key_allowed = 0;
10788 /* Optional registers in Neon instructions are always (not) in operand 1.
10789 Fill in the missing operand here, if it was omitted. */
10790 if (els > 1 && !inst.operands[1].present)
10791 inst.operands[1] = inst.operands[0];
10793 /* Suck up all the varargs. */
10795 for (i = 0; i < els; i++)
10797 unsigned thisarg = va_arg (ap, unsigned);
10798 if (thisarg == N_IGNORE_TYPE)
10803 types[i] = thisarg;
10804 if ((thisarg & N_KEY) != 0)
10809 if (inst.vectype.elems > 0)
10810 for (i = 0; i < els; i++)
10811 if (inst.operands[i].vectype.type != NT_invtype)
10813 first_error (_("types specified in both the mnemonic and operands"));
10817 /* Duplicate inst.vectype elements here as necessary.
10818 FIXME: No idea if this is exactly the same as the ARM assembler,
10819 particularly when an insn takes one register and one non-register
10821 if (inst.vectype.elems == 1 && els > 1)
10824 inst.vectype.elems = els;
10825 inst.vectype.el[key_el] = inst.vectype.el[0];
10826 for (j = 0; j < els; j++)
10828 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10831 else if (inst.vectype.elems == 0 && els > 0)
10834 /* No types were given after the mnemonic, so look for types specified
10835 after each operand. We allow some flexibility here; as long as the
10836 "key" operand has a type, we can infer the others. */
10837 for (j = 0; j < els; j++)
10838 if (inst.operands[j].vectype.type != NT_invtype)
10839 inst.vectype.el[j] = inst.operands[j].vectype;
10841 if (inst.operands[key_el].vectype.type != NT_invtype)
10843 for (j = 0; j < els; j++)
10844 if (inst.operands[j].vectype.type == NT_invtype)
10845 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10850 first_error (_("operand types can't be inferred"));
10854 else if (inst.vectype.elems != els)
10856 first_error (_("type specifier has the wrong number of parts"));
10860 for (pass = 0; pass < 2; pass++)
10862 for (i = 0; i < els; i++)
10864 unsigned thisarg = types[i];
10865 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10866 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10867 enum neon_el_type g_type = inst.vectype.el[i].type;
10868 unsigned g_size = inst.vectype.el[i].size;
10870 /* Decay more-specific signed & unsigned types to sign-insensitive
10871 integer types if sign-specific variants are unavailable. */
10872 if ((g_type == NT_signed || g_type == NT_unsigned)
10873 && (types_allowed & N_SU_ALL) == 0)
10874 g_type = NT_integer;
10876 /* If only untyped args are allowed, decay any more specific types to
10877 them. Some instructions only care about signs for some element
10878 sizes, so handle that properly. */
10879 if ((g_size == 8 && (types_allowed & N_8) != 0)
10880 || (g_size == 16 && (types_allowed & N_16) != 0)
10881 || (g_size == 32 && (types_allowed & N_32) != 0)
10882 || (g_size == 64 && (types_allowed & N_64) != 0))
10883 g_type = NT_untyped;
10887 if ((thisarg & N_KEY) != 0)
10891 key_allowed = thisarg & ~N_KEY;
10896 if ((thisarg & N_VFP) != 0)
10898 enum neon_shape_el regshape = neon_shape_tab[ns].el[i];
10899 unsigned regwidth = neon_shape_el_size[regshape], match;
10901 /* In VFP mode, operands must match register widths. If we
10902 have a key operand, use its width, else use the width of
10903 the current operand. */
10909 if (regwidth != match)
10911 first_error (_("operand size must match register width"));
10916 if ((thisarg & N_EQK) == 0)
10918 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10920 if ((given_type & types_allowed) == 0)
10922 first_error (_("bad type in Neon instruction"));
10928 enum neon_el_type mod_k_type = k_type;
10929 unsigned mod_k_size = k_size;
10930 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10931 if (g_type != mod_k_type || g_size != mod_k_size)
10933 first_error (_("inconsistent types in Neon instruction"));
10941 return inst.vectype.el[key_el];
10944 /* Neon-style VFP instruction forwarding. */
10946 /* Thumb VFP instructions have 0xE in the condition field. */
10949 do_vfp_cond_or_thumb (void)
10952 inst.instruction |= 0xe0000000;
10954 inst.instruction |= inst.cond << 28;
10957 /* Look up and encode a simple mnemonic, for use as a helper function for the
10958 Neon-style VFP syntax. This avoids duplication of bits of the insns table,
10959 etc. It is assumed that operand parsing has already been done, and that the
10960 operands are in the form expected by the given opcode (this isn't necessarily
10961 the same as the form in which they were parsed, hence some massaging must
10962 take place before this function is called).
10963 Checks current arch version against that in the looked-up opcode. */
10966 do_vfp_nsyn_opcode (const char *opname)
10968 const struct asm_opcode *opcode;
10970 opcode = hash_find (arm_ops_hsh, opname);
10975 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
10976 thumb_mode ? *opcode->tvariant : *opcode->avariant),
10981 inst.instruction = opcode->tvalue;
10982 opcode->tencode ();
10986 inst.instruction = (inst.cond << 28) | opcode->avalue;
10987 opcode->aencode ();
10992 do_vfp_nsyn_add_sub (enum neon_shape rs)
10994 int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
10999 do_vfp_nsyn_opcode ("fadds");
11001 do_vfp_nsyn_opcode ("fsubs");
11006 do_vfp_nsyn_opcode ("faddd");
11008 do_vfp_nsyn_opcode ("fsubd");
11012 /* Check operand types to see if this is a VFP instruction, and if so call
11016 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
11018 enum neon_shape rs;
11019 struct neon_type_el et;
11024 rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11025 et = neon_check_type (2, rs,
11026 N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11030 rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11031 et = neon_check_type (3, rs,
11032 N_EQK | N_VFP, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11039 if (et.type != NT_invtype)
11051 do_vfp_nsyn_mla_mls (enum neon_shape rs)
11053 int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
11058 do_vfp_nsyn_opcode ("fmacs");
11060 do_vfp_nsyn_opcode ("fmscs");
11065 do_vfp_nsyn_opcode ("fmacd");
11067 do_vfp_nsyn_opcode ("fmscd");
11072 do_vfp_nsyn_mul (enum neon_shape rs)
11075 do_vfp_nsyn_opcode ("fmuls");
11077 do_vfp_nsyn_opcode ("fmuld");
11081 do_vfp_nsyn_abs_neg (enum neon_shape rs)
11083 int is_neg = (inst.instruction & 0x80) != 0;
11084 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_VFP | N_KEY);
11089 do_vfp_nsyn_opcode ("fnegs");
11091 do_vfp_nsyn_opcode ("fabss");
11096 do_vfp_nsyn_opcode ("fnegd");
11098 do_vfp_nsyn_opcode ("fabsd");
11102 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
11103 insns belong to Neon, and are handled elsewhere. */
11106 do_vfp_nsyn_ldm_stm (int is_dbmode)
11108 int is_ldm = (inst.instruction & (1 << 20)) != 0;
11112 do_vfp_nsyn_opcode ("fldmdbs");
11114 do_vfp_nsyn_opcode ("fldmias");
11119 do_vfp_nsyn_opcode ("fstmdbs");
11121 do_vfp_nsyn_opcode ("fstmias");
11126 do_vfp_nsyn_sqrt (void)
11128 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11129 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11132 do_vfp_nsyn_opcode ("fsqrts");
11134 do_vfp_nsyn_opcode ("fsqrtd");
11138 do_vfp_nsyn_div (void)
11140 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11141 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11142 N_F32 | N_F64 | N_KEY | N_VFP);
11145 do_vfp_nsyn_opcode ("fdivs");
11147 do_vfp_nsyn_opcode ("fdivd");
11151 do_vfp_nsyn_nmul (void)
11153 enum neon_shape rs = neon_select_shape (NS_FFF, NS_DDD, NS_NULL);
11154 neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
11155 N_F32 | N_F64 | N_KEY | N_VFP);
11159 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11160 do_vfp_sp_dyadic ();
11164 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11165 do_vfp_dp_rd_rn_rm ();
11167 do_vfp_cond_or_thumb ();
11171 do_vfp_nsyn_cmp (void)
11173 if (inst.operands[1].isreg)
11175 enum neon_shape rs = neon_select_shape (NS_FF, NS_DD, NS_NULL);
11176 neon_check_type (2, rs, N_EQK | N_VFP, N_F32 | N_F64 | N_KEY | N_VFP);
11180 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11181 do_vfp_sp_monadic ();
11185 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11186 do_vfp_dp_rd_rm ();
11191 enum neon_shape rs = neon_select_shape (NS_FI, NS_DI, NS_NULL);
11192 neon_check_type (2, rs, N_F32 | N_F64 | N_KEY | N_VFP, N_EQK);
11194 switch (inst.instruction & 0x0fffffff)
11197 inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
11200 inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
11208 inst.instruction = NEON_ENC_SINGLE (inst.instruction);
11209 do_vfp_sp_compare_z ();
11213 inst.instruction = NEON_ENC_DOUBLE (inst.instruction);
11217 do_vfp_cond_or_thumb ();
11221 nsyn_insert_sp (void)
11223 inst.operands[1] = inst.operands[0];
11224 memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
11225 inst.operands[0].reg = 13;
11226 inst.operands[0].isreg = 1;
11227 inst.operands[0].writeback = 1;
11228 inst.operands[0].present = 1;
11232 do_vfp_nsyn_push (void)
11235 if (inst.operands[1].issingle)
11236 do_vfp_nsyn_opcode ("fstmdbs");
11238 do_vfp_nsyn_opcode ("fstmdbd");
11242 do_vfp_nsyn_pop (void)
11245 if (inst.operands[1].issingle)
11246 do_vfp_nsyn_opcode ("fldmdbs");
11248 do_vfp_nsyn_opcode ("fldmdbd");
11251 /* Fix up Neon data-processing instructions, ORing in the correct bits for
11252 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
11255 neon_dp_fixup (unsigned i)
11259 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
11273 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
11277 neon_logbits (unsigned x)
11279 return ffs (x) - 4;
11282 #define LOW4(R) ((R) & 0xf)
11283 #define HI1(R) (((R) >> 4) & 1)
11285 /* Encode insns with bit pattern:
11287 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11288 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
11290 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
11291 different meaning for some instruction. */
11294 neon_three_same (int isquad, int ubit, int size)
11296 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11297 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11298 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11299 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11300 inst.instruction |= LOW4 (inst.operands[2].reg);
11301 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11302 inst.instruction |= (isquad != 0) << 6;
11303 inst.instruction |= (ubit != 0) << 24;
11305 inst.instruction |= neon_logbits (size) << 20;
11307 inst.instruction = neon_dp_fixup (inst.instruction);
11310 /* Encode instructions of the form:
11312 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
11313 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
11315 Don't write size if SIZE == -1. */
11318 neon_two_same (int qbit, int ubit, int size)
11320 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11321 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11322 inst.instruction |= LOW4 (inst.operands[1].reg);
11323 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11324 inst.instruction |= (qbit != 0) << 6;
11325 inst.instruction |= (ubit != 0) << 24;
11328 inst.instruction |= neon_logbits (size) << 18;
11330 inst.instruction = neon_dp_fixup (inst.instruction);
11333 /* Neon instruction encoders, in approximate order of appearance. */
11336 do_neon_dyadic_i_su (void)
11338 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11339 struct neon_type_el et = neon_check_type (3, rs,
11340 N_EQK, N_EQK, N_SU_32 | N_KEY);
11341 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11345 do_neon_dyadic_i64_su (void)
11347 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11348 struct neon_type_el et = neon_check_type (3, rs,
11349 N_EQK, N_EQK, N_SU_ALL | N_KEY);
11350 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11354 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
11357 unsigned size = et.size >> 3;
11358 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11359 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11360 inst.instruction |= LOW4 (inst.operands[1].reg);
11361 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11362 inst.instruction |= (isquad != 0) << 6;
11363 inst.instruction |= immbits << 16;
11364 inst.instruction |= (size >> 3) << 7;
11365 inst.instruction |= (size & 0x7) << 19;
11367 inst.instruction |= (uval != 0) << 24;
11369 inst.instruction = neon_dp_fixup (inst.instruction);
11373 do_neon_shl_imm (void)
11375 if (!inst.operands[2].isreg)
11377 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11378 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
11379 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11380 neon_imm_shift (FALSE, 0, neon_quad (rs), et, inst.operands[2].imm);
11384 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11385 struct neon_type_el et = neon_check_type (3, rs,
11386 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11387 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11388 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11393 do_neon_qshl_imm (void)
11395 if (!inst.operands[2].isreg)
11397 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11398 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11399 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11400 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
11401 inst.operands[2].imm);
11405 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11406 struct neon_type_el et = neon_check_type (3, rs,
11407 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
11408 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11409 neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
11414 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
11416 /* Handle .I8 pseudo-instructions. */
11419 /* Unfortunately, this will make everything apart from zero out-of-range.
11420 FIXME is this the intended semantics? There doesn't seem much point in
11421 accepting .I8 if so. */
11422 immediate |= immediate << 8;
11428 if (immediate == (immediate & 0x000000ff))
11430 *immbits = immediate;
11433 else if (immediate == (immediate & 0x0000ff00))
11435 *immbits = immediate >> 8;
11438 else if (immediate == (immediate & 0x00ff0000))
11440 *immbits = immediate >> 16;
11443 else if (immediate == (immediate & 0xff000000))
11445 *immbits = immediate >> 24;
11448 if ((immediate & 0xffff) != (immediate >> 16))
11449 goto bad_immediate;
11450 immediate &= 0xffff;
11453 if (immediate == (immediate & 0x000000ff))
11455 *immbits = immediate;
11458 else if (immediate == (immediate & 0x0000ff00))
11460 *immbits = immediate >> 8;
11465 first_error (_("immediate value out of range"));
11469 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
11473 neon_bits_same_in_bytes (unsigned imm)
11475 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
11476 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
11477 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
11478 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
11481 /* For immediate of above form, return 0bABCD. */
11484 neon_squash_bits (unsigned imm)
11486 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
11487 | ((imm & 0x01000000) >> 21);
11490 /* Compress quarter-float representation to 0b...000 abcdefgh. */
11493 neon_qfloat_bits (unsigned imm)
11495 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
11498 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
11499 the instruction. *OP is passed as the initial value of the op field, and
11500 may be set to a different value depending on the constant (i.e.
11501 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
11502 MVN). If the immediate looks like a repeated parttern then also
11503 try smaller element sizes. */
11506 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
11507 int *op, int size, enum neon_el_type type)
11509 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
11511 if (size != 32 || *op == 1)
11513 *immbits = neon_qfloat_bits (immlo);
11519 if (neon_bits_same_in_bytes (immhi)
11520 && neon_bits_same_in_bytes (immlo))
11524 *immbits = (neon_squash_bits (immhi) << 4)
11525 | neon_squash_bits (immlo);
11530 if (immhi != immlo)
11536 if (immlo == (immlo & 0x000000ff))
11541 else if (immlo == (immlo & 0x0000ff00))
11543 *immbits = immlo >> 8;
11546 else if (immlo == (immlo & 0x00ff0000))
11548 *immbits = immlo >> 16;
11551 else if (immlo == (immlo & 0xff000000))
11553 *immbits = immlo >> 24;
11556 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
11558 *immbits = (immlo >> 8) & 0xff;
11561 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
11563 *immbits = (immlo >> 16) & 0xff;
11567 if ((immlo & 0xffff) != (immlo >> 16))
11574 if (immlo == (immlo & 0x000000ff))
11579 else if (immlo == (immlo & 0x0000ff00))
11581 *immbits = immlo >> 8;
11585 if ((immlo & 0xff) != (immlo >> 8))
11590 if (immlo == (immlo & 0x000000ff))
11592 /* Don't allow MVN with 8-bit immediate. */
11602 /* Write immediate bits [7:0] to the following locations:
11604 |28/24|23 19|18 16|15 4|3 0|
11605 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
11607 This function is used by VMOV/VMVN/VORR/VBIC. */
11610 neon_write_immbits (unsigned immbits)
11612 inst.instruction |= immbits & 0xf;
11613 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
11614 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
11617 /* Invert low-order SIZE bits of XHI:XLO. */
11620 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
11622 unsigned immlo = xlo ? *xlo : 0;
11623 unsigned immhi = xhi ? *xhi : 0;
11628 immlo = (~immlo) & 0xff;
11632 immlo = (~immlo) & 0xffff;
11636 immhi = (~immhi) & 0xffffffff;
11637 /* fall through. */
11640 immlo = (~immlo) & 0xffffffff;
11655 do_neon_logic (void)
11657 if (inst.operands[2].present && inst.operands[2].isreg)
11659 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11660 neon_check_type (3, rs, N_IGNORE_TYPE);
11661 /* U bit and size field were set as part of the bitmask. */
11662 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11663 neon_three_same (neon_quad (rs), 0, -1);
11667 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
11668 struct neon_type_el et = neon_check_type (2, rs,
11669 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
11670 enum neon_opc opcode = inst.instruction & 0x0fffffff;
11674 if (et.type == NT_invtype)
11677 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11679 immbits = inst.operands[1].imm;
11682 /* .i64 is a pseudo-op, so the immediate must be a repeating
11684 if (immbits != (inst.operands[1].regisimm ?
11685 inst.operands[1].reg : 0))
11687 /* Set immbits to an invalid constant. */
11688 immbits = 0xdeadbeef;
11695 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11699 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11703 /* Pseudo-instruction for VBIC. */
11704 neon_invert_size (&immbits, 0, et.size);
11705 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11709 /* Pseudo-instruction for VORR. */
11710 neon_invert_size (&immbits, 0, et.size);
11711 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
11721 inst.instruction |= neon_quad (rs) << 6;
11722 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11723 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11724 inst.instruction |= cmode << 8;
11725 neon_write_immbits (immbits);
11727 inst.instruction = neon_dp_fixup (inst.instruction);
11732 do_neon_bitfield (void)
11734 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11735 neon_check_type (3, rs, N_IGNORE_TYPE);
11736 neon_three_same (neon_quad (rs), 0, -1);
11740 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
11743 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
11744 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
11746 if (et.type == NT_float)
11748 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
11749 neon_three_same (neon_quad (rs), 0, -1);
11753 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11754 neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
11759 do_neon_dyadic_if_su (void)
11761 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11765 do_neon_dyadic_if_su_d (void)
11767 /* This version only allow D registers, but that constraint is enforced during
11768 operand parsing so we don't need to do anything extra here. */
11769 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
11773 do_neon_dyadic_if_i_d (void)
11775 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11776 affected if we specify unsigned args. */
11777 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
11780 enum vfp_or_neon_is_neon_bits
11783 NEON_CHECK_ARCH = 2
11786 /* Call this function if an instruction which may have belonged to the VFP or
11787 Neon instruction sets, but turned out to be a Neon instruction (due to the
11788 operand types involved, etc.). We have to check and/or fix-up a couple of
11791 - Make sure the user hasn't attempted to make a Neon instruction
11793 - Alter the value in the condition code field if necessary.
11794 - Make sure that the arch supports Neon instructions.
11796 Which of these operations take place depends on bits from enum
11797 vfp_or_neon_is_neon_bits.
11799 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
11800 current instruction's condition is COND_ALWAYS, the condition field is
11801 changed to inst.uncond_value. This is necessary because instructions shared
11802 between VFP and Neon may be conditional for the VFP variants only, and the
11803 unconditional Neon version must have, e.g., 0xF in the condition field. */
11806 vfp_or_neon_is_neon (unsigned check)
11808 /* Conditions are always legal in Thumb mode (IT blocks). */
11809 if (!thumb_mode && (check & NEON_CHECK_CC))
11811 if (inst.cond != COND_ALWAYS)
11813 first_error (_(BAD_COND));
11816 if (inst.uncond_value != -1)
11817 inst.instruction |= inst.uncond_value << 28;
11820 if ((check & NEON_CHECK_ARCH)
11821 && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
11823 first_error (_(BAD_FPU));
11831 do_neon_addsub_if_i (void)
11833 if (try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
11836 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11839 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11840 affected if we specify unsigned args. */
11841 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
11844 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
11846 V<op> A,B (A is operand 0, B is operand 2)
11851 so handle that case specially. */
11854 neon_exchange_operands (void)
11856 void *scratch = alloca (sizeof (inst.operands[0]));
11857 if (inst.operands[1].present)
11859 /* Swap operands[1] and operands[2]. */
11860 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
11861 inst.operands[1] = inst.operands[2];
11862 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
11866 inst.operands[1] = inst.operands[2];
11867 inst.operands[2] = inst.operands[0];
11872 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
11874 if (inst.operands[2].isreg)
11877 neon_exchange_operands ();
11878 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
11882 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
11883 struct neon_type_el et = neon_check_type (2, rs,
11884 N_EQK | N_SIZ, immtypes | N_KEY);
11886 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11887 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11888 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11889 inst.instruction |= LOW4 (inst.operands[1].reg);
11890 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11891 inst.instruction |= neon_quad (rs) << 6;
11892 inst.instruction |= (et.type == NT_float) << 10;
11893 inst.instruction |= neon_logbits (et.size) << 18;
11895 inst.instruction = neon_dp_fixup (inst.instruction);
11902 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
11906 do_neon_cmp_inv (void)
11908 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
11914 neon_compare (N_IF_32, N_IF_32, FALSE);
11917 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
11918 scalars, which are encoded in 5 bits, M : Rm.
11919 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
11920 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
11924 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
11926 unsigned regno = NEON_SCALAR_REG (scalar);
11927 unsigned elno = NEON_SCALAR_INDEX (scalar);
11932 if (regno > 7 || elno > 3)
11934 return regno | (elno << 3);
11937 if (regno > 15 || elno > 1)
11939 return regno | (elno << 4);
11943 first_error (_("scalar out of range for multiply instruction"));
11949 /* Encode multiply / multiply-accumulate scalar instructions. */
11952 neon_mul_mac (struct neon_type_el et, int ubit)
11956 /* Give a more helpful error message if we have an invalid type. */
11957 if (et.type == NT_invtype)
11960 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
11961 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11962 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11963 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11964 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11965 inst.instruction |= LOW4 (scalar);
11966 inst.instruction |= HI1 (scalar) << 5;
11967 inst.instruction |= (et.type == NT_float) << 8;
11968 inst.instruction |= neon_logbits (et.size) << 20;
11969 inst.instruction |= (ubit != 0) << 24;
11971 inst.instruction = neon_dp_fixup (inst.instruction);
11975 do_neon_mac_maybe_scalar (void)
11977 if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
11980 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
11983 if (inst.operands[2].isscalar)
11985 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
11986 struct neon_type_el et = neon_check_type (3, rs,
11987 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
11988 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11989 neon_mul_mac (et, neon_quad (rs));
11993 /* The "untyped" case can't happen. Do this to stop the "U" bit being
11994 affected if we specify unsigned args. */
11995 neon_dyadic_misc (NT_untyped, N_IF_32, 0);
12002 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12003 struct neon_type_el et = neon_check_type (3, rs,
12004 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
12005 neon_three_same (neon_quad (rs), 0, et.size);
12008 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
12009 same types as the MAC equivalents. The polynomial type for this instruction
12010 is encoded the same as the integer type. */
12015 if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
12018 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12021 if (inst.operands[2].isscalar)
12022 do_neon_mac_maybe_scalar ();
12024 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
12028 do_neon_qdmulh (void)
12030 if (inst.operands[2].isscalar)
12032 enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
12033 struct neon_type_el et = neon_check_type (3, rs,
12034 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12035 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12036 neon_mul_mac (et, neon_quad (rs));
12040 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12041 struct neon_type_el et = neon_check_type (3, rs,
12042 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
12043 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12044 /* The U bit (rounding) comes from bit mask. */
12045 neon_three_same (neon_quad (rs), 0, et.size);
12050 do_neon_fcmp_absolute (void)
12052 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12053 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12054 /* Size field comes from bit mask. */
12055 neon_three_same (neon_quad (rs), 1, -1);
12059 do_neon_fcmp_absolute_inv (void)
12061 neon_exchange_operands ();
12062 do_neon_fcmp_absolute ();
12066 do_neon_step (void)
12068 enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
12069 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
12070 neon_three_same (neon_quad (rs), 0, -1);
12074 do_neon_abs_neg (void)
12076 enum neon_shape rs;
12077 struct neon_type_el et;
12079 if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
12082 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12085 rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12086 et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
12088 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12089 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12090 inst.instruction |= LOW4 (inst.operands[1].reg);
12091 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12092 inst.instruction |= neon_quad (rs) << 6;
12093 inst.instruction |= (et.type == NT_float) << 10;
12094 inst.instruction |= neon_logbits (et.size) << 18;
12096 inst.instruction = neon_dp_fixup (inst.instruction);
12102 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12103 struct neon_type_el et = neon_check_type (2, rs,
12104 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12105 int imm = inst.operands[2].imm;
12106 constraint (imm < 0 || (unsigned)imm >= et.size,
12107 _("immediate out of range for insert"));
12108 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12114 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12115 struct neon_type_el et = neon_check_type (2, rs,
12116 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12117 int imm = inst.operands[2].imm;
12118 constraint (imm < 1 || (unsigned)imm > et.size,
12119 _("immediate out of range for insert"));
12120 neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
12124 do_neon_qshlu_imm (void)
12126 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
12127 struct neon_type_el et = neon_check_type (2, rs,
12128 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
12129 int imm = inst.operands[2].imm;
12130 constraint (imm < 0 || (unsigned)imm >= et.size,
12131 _("immediate out of range for shift"));
12132 /* Only encodes the 'U present' variant of the instruction.
12133 In this case, signed types have OP (bit 8) set to 0.
12134 Unsigned types have OP set to 1. */
12135 inst.instruction |= (et.type == NT_unsigned) << 8;
12136 /* The rest of the bits are the same as other immediate shifts. */
12137 neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
12141 do_neon_qmovn (void)
12143 struct neon_type_el et = neon_check_type (2, NS_DQ,
12144 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12145 /* Saturating move where operands can be signed or unsigned, and the
12146 destination has the same signedness. */
12147 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12148 if (et.type == NT_unsigned)
12149 inst.instruction |= 0xc0;
12151 inst.instruction |= 0x80;
12152 neon_two_same (0, 1, et.size / 2);
12156 do_neon_qmovun (void)
12158 struct neon_type_el et = neon_check_type (2, NS_DQ,
12159 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12160 /* Saturating move with unsigned results. Operands must be signed. */
12161 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12162 neon_two_same (0, 1, et.size / 2);
12166 do_neon_rshift_sat_narrow (void)
12168 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12169 or unsigned. If operands are unsigned, results must also be unsigned. */
12170 struct neon_type_el et = neon_check_type (2, NS_DQI,
12171 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
12172 int imm = inst.operands[2].imm;
12173 /* This gets the bounds check, size encoding and immediate bits calculation
12177 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
12178 VQMOVN.I<size> <Dd>, <Qm>. */
12181 inst.operands[2].present = 0;
12182 inst.instruction = N_MNEM_vqmovn;
12187 constraint (imm < 1 || (unsigned)imm > et.size,
12188 _("immediate out of range"));
12189 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
12193 do_neon_rshift_sat_narrow_u (void)
12195 /* FIXME: Types for narrowing. If operands are signed, results can be signed
12196 or unsigned. If operands are unsigned, results must also be unsigned. */
12197 struct neon_type_el et = neon_check_type (2, NS_DQI,
12198 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
12199 int imm = inst.operands[2].imm;
12200 /* This gets the bounds check, size encoding and immediate bits calculation
12204 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
12205 VQMOVUN.I<size> <Dd>, <Qm>. */
12208 inst.operands[2].present = 0;
12209 inst.instruction = N_MNEM_vqmovun;
12214 constraint (imm < 1 || (unsigned)imm > et.size,
12215 _("immediate out of range"));
12216 /* FIXME: The manual is kind of unclear about what value U should have in
12217 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
12219 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
12223 do_neon_movn (void)
12225 struct neon_type_el et = neon_check_type (2, NS_DQ,
12226 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12227 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12228 neon_two_same (0, 1, et.size / 2);
12232 do_neon_rshift_narrow (void)
12234 struct neon_type_el et = neon_check_type (2, NS_DQI,
12235 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
12236 int imm = inst.operands[2].imm;
12237 /* This gets the bounds check, size encoding and immediate bits calculation
12241 /* If immediate is zero then we are a pseudo-instruction for
12242 VMOVN.I<size> <Dd>, <Qm> */
12245 inst.operands[2].present = 0;
12246 inst.instruction = N_MNEM_vmovn;
12251 constraint (imm < 1 || (unsigned)imm > et.size,
12252 _("immediate out of range for narrowing operation"));
12253 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
12257 do_neon_shll (void)
12259 /* FIXME: Type checking when lengthening. */
12260 struct neon_type_el et = neon_check_type (2, NS_QDI,
12261 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
12262 unsigned imm = inst.operands[2].imm;
12264 if (imm == et.size)
12266 /* Maximum shift variant. */
12267 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12268 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12269 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12270 inst.instruction |= LOW4 (inst.operands[1].reg);
12271 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12272 inst.instruction |= neon_logbits (et.size) << 18;
12274 inst.instruction = neon_dp_fixup (inst.instruction);
12278 /* A more-specific type check for non-max versions. */
12279 et = neon_check_type (2, NS_QDI,
12280 N_EQK | N_DBL, N_SU_32 | N_KEY);
12281 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12282 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
12286 /* Check the various types for the VCVT instruction, and return which version
12287 the current instruction is. */
12290 neon_cvt_flavour (enum neon_shape rs)
12292 #define CVT_VAR(C,X,Y) \
12293 et = neon_check_type (2, rs, whole_reg | (X), whole_reg | (Y)); \
12294 if (et.type != NT_invtype) \
12296 inst.error = NULL; \
12299 struct neon_type_el et;
12300 unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
12301 || rs == NS_FF) ? N_VFP : 0;
12302 /* The instruction versions which take an immediate take one register
12303 argument, which is extended to the width of the full register. Thus the
12304 "source" and "destination" registers must have the same width. Hack that
12305 here by making the size equal to the key (wider, in this case) operand. */
12306 unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
12308 CVT_VAR (0, N_S32, N_F32);
12309 CVT_VAR (1, N_U32, N_F32);
12310 CVT_VAR (2, N_F32, N_S32);
12311 CVT_VAR (3, N_F32, N_U32);
12315 /* VFP instructions. */
12316 CVT_VAR (4, N_F32, N_F64);
12317 CVT_VAR (5, N_F64, N_F32);
12318 CVT_VAR (6, N_S32, N_F64 | key);
12319 CVT_VAR (7, N_U32, N_F64 | key);
12320 CVT_VAR (8, N_F64 | key, N_S32);
12321 CVT_VAR (9, N_F64 | key, N_U32);
12322 /* VFP instructions with bitshift. */
12323 CVT_VAR (10, N_F32 | key, N_S16);
12324 CVT_VAR (11, N_F32 | key, N_U16);
12325 CVT_VAR (12, N_F64 | key, N_S16);
12326 CVT_VAR (13, N_F64 | key, N_U16);
12327 CVT_VAR (14, N_S16, N_F32 | key);
12328 CVT_VAR (15, N_U16, N_F32 | key);
12329 CVT_VAR (16, N_S16, N_F64 | key);
12330 CVT_VAR (17, N_U16, N_F64 | key);
12336 /* Neon-syntax VFP conversions. */
12339 do_vfp_nsyn_cvt (enum neon_shape rs, int flavour)
12341 const char *opname = 0;
12343 if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI)
12345 /* Conversions with immediate bitshift. */
12346 const char *enc[] =
12368 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12370 opname = enc[flavour];
12371 constraint (inst.operands[0].reg != inst.operands[1].reg,
12372 _("operands 0 and 1 must be the same register"));
12373 inst.operands[1] = inst.operands[2];
12374 memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
12379 /* Conversions without bitshift. */
12380 const char *enc[] =
12394 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc))
12395 opname = enc[flavour];
12399 do_vfp_nsyn_opcode (opname);
12403 do_vfp_nsyn_cvtz (void)
12405 enum neon_shape rs = neon_select_shape (NS_FF, NS_FD, NS_NULL);
12406 int flavour = neon_cvt_flavour (rs);
12407 const char *enc[] =
12419 if (flavour >= 0 && flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
12420 do_vfp_nsyn_opcode (enc[flavour]);
12426 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
12427 NS_FD, NS_DF, NS_FF, NS_NULL);
12428 int flavour = neon_cvt_flavour (rs);
12430 /* VFP rather than Neon conversions. */
12433 do_vfp_nsyn_cvt (rs, flavour);
12442 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12445 /* Fixed-point conversion with #0 immediate is encoded as an
12446 integer conversion. */
12447 if (inst.operands[2].present && inst.operands[2].imm == 0)
12449 unsigned immbits = 32 - inst.operands[2].imm;
12450 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
12451 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12453 inst.instruction |= enctab[flavour];
12454 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12455 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12456 inst.instruction |= LOW4 (inst.operands[1].reg);
12457 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12458 inst.instruction |= neon_quad (rs) << 6;
12459 inst.instruction |= 1 << 21;
12460 inst.instruction |= immbits << 16;
12462 inst.instruction = neon_dp_fixup (inst.instruction);
12470 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
12472 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12474 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12478 inst.instruction |= enctab[flavour];
12480 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12481 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12482 inst.instruction |= LOW4 (inst.operands[1].reg);
12483 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12484 inst.instruction |= neon_quad (rs) << 6;
12485 inst.instruction |= 2 << 18;
12487 inst.instruction = neon_dp_fixup (inst.instruction);
12492 /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32). */
12493 do_vfp_nsyn_cvt (rs, flavour);
12498 neon_move_immediate (void)
12500 enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
12501 struct neon_type_el et = neon_check_type (2, rs,
12502 N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
12503 unsigned immlo, immhi = 0, immbits;
12506 constraint (et.type == NT_invtype,
12507 _("operand size must be specified for immediate VMOV"));
12509 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
12510 op = (inst.instruction & (1 << 5)) != 0;
12512 immlo = inst.operands[1].imm;
12513 if (inst.operands[1].regisimm)
12514 immhi = inst.operands[1].reg;
12516 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
12517 _("immediate has bits set outside the operand size"));
12519 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12520 et.size, et.type)) == FAIL)
12522 /* Invert relevant bits only. */
12523 neon_invert_size (&immlo, &immhi, et.size);
12524 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
12525 with one or the other; those cases are caught by
12526 neon_cmode_for_move_imm. */
12528 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
12529 et.size, et.type)) == FAIL)
12531 first_error (_("immediate out of range"));
12536 inst.instruction &= ~(1 << 5);
12537 inst.instruction |= op << 5;
12539 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12540 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12541 inst.instruction |= neon_quad (rs) << 6;
12542 inst.instruction |= cmode << 8;
12544 neon_write_immbits (immbits);
12550 if (inst.operands[1].isreg)
12552 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12554 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12555 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12556 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12557 inst.instruction |= LOW4 (inst.operands[1].reg);
12558 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12559 inst.instruction |= neon_quad (rs) << 6;
12563 inst.instruction = NEON_ENC_IMMED (inst.instruction);
12564 neon_move_immediate ();
12567 inst.instruction = neon_dp_fixup (inst.instruction);
12570 /* Encode instructions of form:
12572 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
12573 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
12578 neon_mixed_length (struct neon_type_el et, unsigned size)
12580 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12581 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12582 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12583 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12584 inst.instruction |= LOW4 (inst.operands[2].reg);
12585 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12586 inst.instruction |= (et.type == NT_unsigned) << 24;
12587 inst.instruction |= neon_logbits (size) << 20;
12589 inst.instruction = neon_dp_fixup (inst.instruction);
12593 do_neon_dyadic_long (void)
12595 /* FIXME: Type checking for lengthening op. */
12596 struct neon_type_el et = neon_check_type (3, NS_QDD,
12597 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
12598 neon_mixed_length (et, et.size);
12602 do_neon_abal (void)
12604 struct neon_type_el et = neon_check_type (3, NS_QDD,
12605 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
12606 neon_mixed_length (et, et.size);
12610 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
12612 if (inst.operands[2].isscalar)
12614 struct neon_type_el et = neon_check_type (3, NS_QDS,
12615 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
12616 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12617 neon_mul_mac (et, et.type == NT_unsigned);
12621 struct neon_type_el et = neon_check_type (3, NS_QDD,
12622 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
12623 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12624 neon_mixed_length (et, et.size);
12629 do_neon_mac_maybe_scalar_long (void)
12631 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
12635 do_neon_dyadic_wide (void)
12637 struct neon_type_el et = neon_check_type (3, NS_QQD,
12638 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
12639 neon_mixed_length (et, et.size);
12643 do_neon_dyadic_narrow (void)
12645 struct neon_type_el et = neon_check_type (3, NS_QDD,
12646 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
12647 /* Operand sign is unimportant, and the U bit is part of the opcode,
12648 so force the operand type to integer. */
12649 et.type = NT_integer;
12650 neon_mixed_length (et, et.size / 2);
12654 do_neon_mul_sat_scalar_long (void)
12656 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
12660 do_neon_vmull (void)
12662 if (inst.operands[2].isscalar)
12663 do_neon_mac_maybe_scalar_long ();
12666 struct neon_type_el et = neon_check_type (3, NS_QDD,
12667 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
12668 if (et.type == NT_poly)
12669 inst.instruction = NEON_ENC_POLY (inst.instruction);
12671 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
12672 /* For polynomial encoding, size field must be 0b00 and the U bit must be
12673 zero. Should be OK as-is. */
12674 neon_mixed_length (et, et.size);
12681 enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
12682 struct neon_type_el et = neon_check_type (3, rs,
12683 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
12684 unsigned imm = (inst.operands[3].imm * et.size) / 8;
12685 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12686 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12687 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12688 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12689 inst.instruction |= LOW4 (inst.operands[2].reg);
12690 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12691 inst.instruction |= neon_quad (rs) << 6;
12692 inst.instruction |= imm << 8;
12694 inst.instruction = neon_dp_fixup (inst.instruction);
12700 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
12701 struct neon_type_el et = neon_check_type (2, rs,
12702 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12703 unsigned op = (inst.instruction >> 7) & 3;
12704 /* N (width of reversed regions) is encoded as part of the bitmask. We
12705 extract it here to check the elements to be reversed are smaller.
12706 Otherwise we'd get a reserved instruction. */
12707 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
12708 assert (elsize != 0);
12709 constraint (et.size >= elsize,
12710 _("elements must be smaller than reversal region"));
12711 neon_two_same (neon_quad (rs), 1, et.size);
12717 if (inst.operands[1].isscalar)
12719 enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
12720 struct neon_type_el et = neon_check_type (2, rs,
12721 N_EQK, N_8 | N_16 | N_32 | N_KEY);
12722 unsigned sizebits = et.size >> 3;
12723 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
12724 int logsize = neon_logbits (et.size);
12725 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
12727 if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
12730 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
12731 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12732 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12733 inst.instruction |= LOW4 (dm);
12734 inst.instruction |= HI1 (dm) << 5;
12735 inst.instruction |= neon_quad (rs) << 6;
12736 inst.instruction |= x << 17;
12737 inst.instruction |= sizebits << 16;
12739 inst.instruction = neon_dp_fixup (inst.instruction);
12743 enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
12744 struct neon_type_el et = neon_check_type (2, rs,
12745 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12746 /* Duplicate ARM register to lanes of vector. */
12747 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
12750 case 8: inst.instruction |= 0x400000; break;
12751 case 16: inst.instruction |= 0x000020; break;
12752 case 32: inst.instruction |= 0x000000; break;
12755 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
12756 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
12757 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
12758 inst.instruction |= neon_quad (rs) << 21;
12759 /* The encoding for this instruction is identical for the ARM and Thumb
12760 variants, except for the condition field. */
12761 do_vfp_cond_or_thumb ();
12765 /* VMOV has particularly many variations. It can be one of:
12766 0. VMOV<c><q> <Qd>, <Qm>
12767 1. VMOV<c><q> <Dd>, <Dm>
12768 (Register operations, which are VORR with Rm = Rn.)
12769 2. VMOV<c><q>.<dt> <Qd>, #<imm>
12770 3. VMOV<c><q>.<dt> <Dd>, #<imm>
12772 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
12773 (ARM register to scalar.)
12774 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
12775 (Two ARM registers to vector.)
12776 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
12777 (Scalar to ARM register.)
12778 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
12779 (Vector to two ARM registers.)
12780 8. VMOV.F32 <Sd>, <Sm>
12781 9. VMOV.F64 <Dd>, <Dm>
12782 (VFP register moves.)
12783 10. VMOV.F32 <Sd>, #imm
12784 11. VMOV.F64 <Dd>, #imm
12785 (VFP float immediate load.)
12786 12. VMOV <Rd>, <Sm>
12787 (VFP single to ARM reg.)
12788 13. VMOV <Sd>, <Rm>
12789 (ARM reg to VFP single.)
12790 14. VMOV <Rd>, <Re>, <Sn>, <Sm>
12791 (Two ARM regs to two VFP singles.)
12792 15. VMOV <Sd>, <Se>, <Rn>, <Rm>
12793 (Two VFP singles to two ARM regs.)
12795 These cases can be disambiguated using neon_select_shape, except cases 1/9
12796 and 3/11 which depend on the operand type too.
12798 All the encoded bits are hardcoded by this function.
12800 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
12801 Cases 5, 7 may be used with VFPv2 and above.
12803 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
12804 can specify a type where it doesn't make sense to, and is ignored).
12810 enum neon_shape rs = neon_select_shape (NS_RRFF, NS_FFRR, NS_DRR, NS_RRD,
12811 NS_QQ, NS_DD, NS_QI, NS_DI, NS_SR, NS_RS, NS_FF, NS_FI, NS_RF, NS_FR,
12813 struct neon_type_el et;
12814 const char *ldconst = 0;
12818 case NS_DD: /* case 1/9. */
12819 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12820 /* It is not an error here if no type is given. */
12822 if (et.type == NT_float && et.size == 64)
12824 do_vfp_nsyn_opcode ("fcpyd");
12827 /* fall through. */
12829 case NS_QQ: /* case 0/1. */
12831 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12833 /* The architecture manual I have doesn't explicitly state which
12834 value the U bit should have for register->register moves, but
12835 the equivalent VORR instruction has U = 0, so do that. */
12836 inst.instruction = 0x0200110;
12837 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12838 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12839 inst.instruction |= LOW4 (inst.operands[1].reg);
12840 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
12841 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
12842 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
12843 inst.instruction |= neon_quad (rs) << 6;
12845 inst.instruction = neon_dp_fixup (inst.instruction);
12849 case NS_DI: /* case 3/11. */
12850 et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
12852 if (et.type == NT_float && et.size == 64)
12854 /* case 11 (fconstd). */
12855 ldconst = "fconstd";
12856 goto encode_fconstd;
12858 /* fall through. */
12860 case NS_QI: /* case 2/3. */
12861 if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
12863 inst.instruction = 0x0800010;
12864 neon_move_immediate ();
12865 inst.instruction = neon_dp_fixup (inst.instruction);
12868 case NS_SR: /* case 4. */
12870 unsigned bcdebits = 0;
12871 struct neon_type_el et = neon_check_type (2, NS_NULL,
12872 N_8 | N_16 | N_32 | N_KEY, N_EQK);
12873 int logsize = neon_logbits (et.size);
12874 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
12875 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
12877 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12879 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12880 && et.size != 32, _(BAD_FPU));
12881 constraint (et.type == NT_invtype, _("bad type for scalar"));
12882 constraint (x >= 64 / et.size, _("scalar index out of range"));
12886 case 8: bcdebits = 0x8; break;
12887 case 16: bcdebits = 0x1; break;
12888 case 32: bcdebits = 0x0; break;
12892 bcdebits |= x << logsize;
12894 inst.instruction = 0xe000b10;
12895 do_vfp_cond_or_thumb ();
12896 inst.instruction |= LOW4 (dn) << 16;
12897 inst.instruction |= HI1 (dn) << 7;
12898 inst.instruction |= inst.operands[1].reg << 12;
12899 inst.instruction |= (bcdebits & 3) << 5;
12900 inst.instruction |= (bcdebits >> 2) << 21;
12904 case NS_DRR: /* case 5 (fmdrr). */
12905 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12908 inst.instruction = 0xc400b10;
12909 do_vfp_cond_or_thumb ();
12910 inst.instruction |= LOW4 (inst.operands[0].reg);
12911 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
12912 inst.instruction |= inst.operands[1].reg << 12;
12913 inst.instruction |= inst.operands[2].reg << 16;
12916 case NS_RS: /* case 6. */
12918 struct neon_type_el et = neon_check_type (2, NS_NULL,
12919 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
12920 unsigned logsize = neon_logbits (et.size);
12921 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
12922 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
12923 unsigned abcdebits = 0;
12925 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
12927 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
12928 && et.size != 32, _(BAD_FPU));
12929 constraint (et.type == NT_invtype, _("bad type for scalar"));
12930 constraint (x >= 64 / et.size, _("scalar index out of range"));
12934 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
12935 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
12936 case 32: abcdebits = 0x00; break;
12940 abcdebits |= x << logsize;
12941 inst.instruction = 0xe100b10;
12942 do_vfp_cond_or_thumb ();
12943 inst.instruction |= LOW4 (dn) << 16;
12944 inst.instruction |= HI1 (dn) << 7;
12945 inst.instruction |= inst.operands[0].reg << 12;
12946 inst.instruction |= (abcdebits & 3) << 5;
12947 inst.instruction |= (abcdebits >> 2) << 21;
12951 case NS_RRD: /* case 7 (fmrrd). */
12952 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
12955 inst.instruction = 0xc500b10;
12956 do_vfp_cond_or_thumb ();
12957 inst.instruction |= inst.operands[0].reg << 12;
12958 inst.instruction |= inst.operands[1].reg << 16;
12959 inst.instruction |= LOW4 (inst.operands[2].reg);
12960 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
12963 case NS_FF: /* case 8 (fcpys). */
12964 do_vfp_nsyn_opcode ("fcpys");
12967 case NS_FI: /* case 10 (fconsts). */
12968 ldconst = "fconsts";
12970 if (is_quarter_float (inst.operands[1].imm))
12972 inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
12973 do_vfp_nsyn_opcode (ldconst);
12976 first_error (_("immediate out of range"));
12979 case NS_RF: /* case 12 (fmrs). */
12980 do_vfp_nsyn_opcode ("fmrs");
12983 case NS_FR: /* case 13 (fmsr). */
12984 do_vfp_nsyn_opcode ("fmsr");
12987 /* The encoders for the fmrrs and fmsrr instructions expect three operands
12988 (one of which is a list), but we have parsed four. Do some fiddling to
12989 make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
12991 case NS_RRFF: /* case 14 (fmrrs). */
12992 constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
12993 _("VFP registers must be adjacent"));
12994 inst.operands[2].imm = 2;
12995 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
12996 do_vfp_nsyn_opcode ("fmrrs");
12999 case NS_FFRR: /* case 15 (fmsrr). */
13000 constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
13001 _("VFP registers must be adjacent"));
13002 inst.operands[1] = inst.operands[2];
13003 inst.operands[2] = inst.operands[3];
13004 inst.operands[0].imm = 2;
13005 memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
13006 do_vfp_nsyn_opcode ("fmsrr");
13015 do_neon_rshift_round_imm (void)
13017 enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
13018 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
13019 int imm = inst.operands[2].imm;
13021 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
13024 inst.operands[2].present = 0;
13029 constraint (imm < 1 || (unsigned)imm > et.size,
13030 _("immediate out of range for shift"));
13031 neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
13036 do_neon_movl (void)
13038 struct neon_type_el et = neon_check_type (2, NS_QD,
13039 N_EQK | N_DBL, N_SU_32 | N_KEY);
13040 unsigned sizebits = et.size >> 3;
13041 inst.instruction |= sizebits << 19;
13042 neon_two_same (0, et.type == NT_unsigned, -1);
13048 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13049 struct neon_type_el et = neon_check_type (2, rs,
13050 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13051 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
13052 neon_two_same (neon_quad (rs), 1, et.size);
13056 do_neon_zip_uzp (void)
13058 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13059 struct neon_type_el et = neon_check_type (2, rs,
13060 N_EQK, N_8 | N_16 | N_32 | N_KEY);
13061 if (rs == NS_DD && et.size == 32)
13063 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
13064 inst.instruction = N_MNEM_vtrn;
13068 neon_two_same (neon_quad (rs), 1, et.size);
13072 do_neon_sat_abs_neg (void)
13074 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13075 struct neon_type_el et = neon_check_type (2, rs,
13076 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13077 neon_two_same (neon_quad (rs), 1, et.size);
13081 do_neon_pair_long (void)
13083 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13084 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
13085 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
13086 inst.instruction |= (et.type == NT_unsigned) << 7;
13087 neon_two_same (neon_quad (rs), 1, et.size);
13091 do_neon_recip_est (void)
13093 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13094 struct neon_type_el et = neon_check_type (2, rs,
13095 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
13096 inst.instruction |= (et.type == NT_float) << 8;
13097 neon_two_same (neon_quad (rs), 1, et.size);
13103 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13104 struct neon_type_el et = neon_check_type (2, rs,
13105 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
13106 neon_two_same (neon_quad (rs), 1, et.size);
13112 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13113 struct neon_type_el et = neon_check_type (2, rs,
13114 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
13115 neon_two_same (neon_quad (rs), 1, et.size);
13121 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13122 struct neon_type_el et = neon_check_type (2, rs,
13123 N_EQK | N_INT, N_8 | N_KEY);
13124 neon_two_same (neon_quad (rs), 1, et.size);
13130 enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
13131 neon_two_same (neon_quad (rs), 1, -1);
13135 do_neon_tbl_tbx (void)
13137 unsigned listlenbits;
13138 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
13140 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
13142 first_error (_("bad list length for table lookup"));
13146 listlenbits = inst.operands[1].imm - 1;
13147 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13148 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13149 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
13150 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
13151 inst.instruction |= LOW4 (inst.operands[2].reg);
13152 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
13153 inst.instruction |= listlenbits << 8;
13155 inst.instruction = neon_dp_fixup (inst.instruction);
13159 do_neon_ldm_stm (void)
13161 /* P, U and L bits are part of bitmask. */
13162 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
13163 unsigned offsetbits = inst.operands[1].imm * 2;
13165 if (inst.operands[1].issingle)
13167 do_vfp_nsyn_ldm_stm (is_dbmode);
13171 constraint (is_dbmode && !inst.operands[0].writeback,
13172 _("writeback (!) must be used for VLDMDB and VSTMDB"));
13174 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
13175 _("register list must contain at least 1 and at most 16 "
13178 inst.instruction |= inst.operands[0].reg << 16;
13179 inst.instruction |= inst.operands[0].writeback << 21;
13180 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
13181 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
13183 inst.instruction |= offsetbits;
13185 do_vfp_cond_or_thumb ();
13189 do_neon_ldr_str (void)
13191 int is_ldr = (inst.instruction & (1 << 20)) != 0;
13193 if (inst.operands[0].issingle)
13196 do_vfp_nsyn_opcode ("flds");
13198 do_vfp_nsyn_opcode ("fsts");
13203 do_vfp_nsyn_opcode ("fldd");
13205 do_vfp_nsyn_opcode ("fstd");
13209 /* "interleave" version also handles non-interleaving register VLD1/VST1
13213 do_neon_ld_st_interleave (void)
13215 struct neon_type_el et = neon_check_type (1, NS_NULL,
13216 N_8 | N_16 | N_32 | N_64);
13217 unsigned alignbits = 0;
13219 /* The bits in this table go:
13220 0: register stride of one (0) or two (1)
13221 1,2: register list length, minus one (1, 2, 3, 4).
13222 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
13223 We use -1 for invalid entries. */
13224 const int typetable[] =
13226 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
13227 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
13228 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
13229 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
13233 if (et.type == NT_invtype)
13236 if (inst.operands[1].immisalign)
13237 switch (inst.operands[1].imm >> 8)
13239 case 64: alignbits = 1; break;
13241 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13242 goto bad_alignment;
13246 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
13247 goto bad_alignment;
13252 first_error (_("bad alignment"));
13256 inst.instruction |= alignbits << 4;
13257 inst.instruction |= neon_logbits (et.size) << 6;
13259 /* Bits [4:6] of the immediate in a list specifier encode register stride
13260 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
13261 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
13262 up the right value for "type" in a table based on this value and the given
13263 list style, then stick it back. */
13264 idx = ((inst.operands[0].imm >> 4) & 7)
13265 | (((inst.instruction >> 8) & 3) << 3);
13267 typebits = typetable[idx];
13269 constraint (typebits == -1, _("bad list type for instruction"));
13271 inst.instruction &= ~0xf00;
13272 inst.instruction |= typebits << 8;
13275 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
13276 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
13277 otherwise. The variable arguments are a list of pairs of legal (size, align)
13278 values, terminated with -1. */
13281 neon_alignment_bit (int size, int align, int *do_align, ...)
13284 int result = FAIL, thissize, thisalign;
13286 if (!inst.operands[1].immisalign)
13292 va_start (ap, do_align);
13296 thissize = va_arg (ap, int);
13297 if (thissize == -1)
13299 thisalign = va_arg (ap, int);
13301 if (size == thissize && align == thisalign)
13304 while (result != SUCCESS);
13308 if (result == SUCCESS)
13311 first_error (_("unsupported alignment for instruction"));
13317 do_neon_ld_st_lane (void)
13319 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13320 int align_good, do_align = 0;
13321 int logsize = neon_logbits (et.size);
13322 int align = inst.operands[1].imm >> 8;
13323 int n = (inst.instruction >> 8) & 3;
13324 int max_el = 64 / et.size;
13326 if (et.type == NT_invtype)
13329 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
13330 _("bad list length"));
13331 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
13332 _("scalar index out of range"));
13333 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
13335 _("stride of 2 unavailable when element size is 8"));
13339 case 0: /* VLD1 / VST1. */
13340 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
13342 if (align_good == FAIL)
13346 unsigned alignbits = 0;
13349 case 16: alignbits = 0x1; break;
13350 case 32: alignbits = 0x3; break;
13353 inst.instruction |= alignbits << 4;
13357 case 1: /* VLD2 / VST2. */
13358 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
13360 if (align_good == FAIL)
13363 inst.instruction |= 1 << 4;
13366 case 2: /* VLD3 / VST3. */
13367 constraint (inst.operands[1].immisalign,
13368 _("can't use alignment with this instruction"));
13371 case 3: /* VLD4 / VST4. */
13372 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13373 16, 64, 32, 64, 32, 128, -1);
13374 if (align_good == FAIL)
13378 unsigned alignbits = 0;
13381 case 8: alignbits = 0x1; break;
13382 case 16: alignbits = 0x1; break;
13383 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
13386 inst.instruction |= alignbits << 4;
13393 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
13394 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13395 inst.instruction |= 1 << (4 + logsize);
13397 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
13398 inst.instruction |= logsize << 10;
13401 /* Encode single n-element structure to all lanes VLD<n> instructions. */
13404 do_neon_ld_dup (void)
13406 struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
13407 int align_good, do_align = 0;
13409 if (et.type == NT_invtype)
13412 switch ((inst.instruction >> 8) & 3)
13414 case 0: /* VLD1. */
13415 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
13416 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13417 &do_align, 16, 16, 32, 32, -1);
13418 if (align_good == FAIL)
13420 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
13423 case 2: inst.instruction |= 1 << 5; break;
13424 default: first_error (_("bad list length")); return;
13426 inst.instruction |= neon_logbits (et.size) << 6;
13429 case 1: /* VLD2. */
13430 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
13431 &do_align, 8, 16, 16, 32, 32, 64, -1);
13432 if (align_good == FAIL)
13434 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
13435 _("bad list length"));
13436 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13437 inst.instruction |= 1 << 5;
13438 inst.instruction |= neon_logbits (et.size) << 6;
13441 case 2: /* VLD3. */
13442 constraint (inst.operands[1].immisalign,
13443 _("can't use alignment with this instruction"));
13444 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
13445 _("bad list length"));
13446 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13447 inst.instruction |= 1 << 5;
13448 inst.instruction |= neon_logbits (et.size) << 6;
13451 case 3: /* VLD4. */
13453 int align = inst.operands[1].imm >> 8;
13454 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
13455 16, 64, 32, 64, 32, 128, -1);
13456 if (align_good == FAIL)
13458 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
13459 _("bad list length"));
13460 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
13461 inst.instruction |= 1 << 5;
13462 if (et.size == 32 && align == 128)
13463 inst.instruction |= 0x3 << 6;
13465 inst.instruction |= neon_logbits (et.size) << 6;
13472 inst.instruction |= do_align << 4;
13475 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
13476 apart from bits [11:4]. */
13479 do_neon_ldx_stx (void)
13481 switch (NEON_LANE (inst.operands[0].imm))
13483 case NEON_INTERLEAVE_LANES:
13484 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
13485 do_neon_ld_st_interleave ();
13488 case NEON_ALL_LANES:
13489 inst.instruction = NEON_ENC_DUP (inst.instruction);
13494 inst.instruction = NEON_ENC_LANE (inst.instruction);
13495 do_neon_ld_st_lane ();
13498 /* L bit comes from bit mask. */
13499 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
13500 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
13501 inst.instruction |= inst.operands[1].reg << 16;
13503 if (inst.operands[1].postind)
13505 int postreg = inst.operands[1].imm & 0xf;
13506 constraint (!inst.operands[1].immisreg,
13507 _("post-index must be a register"));
13508 constraint (postreg == 0xd || postreg == 0xf,
13509 _("bad register for post-index"));
13510 inst.instruction |= postreg;
13512 else if (inst.operands[1].writeback)
13514 inst.instruction |= 0xd;
13517 inst.instruction |= 0xf;
13520 inst.instruction |= 0xf9000000;
13522 inst.instruction |= 0xf4000000;
13526 /* Overall per-instruction processing. */
13528 /* We need to be able to fix up arbitrary expressions in some statements.
13529 This is so that we can handle symbols that are an arbitrary distance from
13530 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
13531 which returns part of an address in a form which will be valid for
13532 a data instruction. We do this by pushing the expression into a symbol
13533 in the expr_section, and creating a fix for that. */
13536 fix_new_arm (fragS * frag,
13551 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
13555 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
13560 /* Mark whether the fix is to a THUMB instruction, or an ARM
13562 new_fix->tc_fix_data = thumb_mode;
13565 /* Create a frg for an instruction requiring relaxation. */
13567 output_relax_insn (void)
13574 /* The size of the instruction is unknown, so tie the debug info to the
13575 start of the instruction. */
13576 dwarf2_emit_insn (0);
13579 switch (inst.reloc.exp.X_op)
13582 sym = inst.reloc.exp.X_add_symbol;
13583 offset = inst.reloc.exp.X_add_number;
13587 offset = inst.reloc.exp.X_add_number;
13590 sym = make_expr_symbol (&inst.reloc.exp);
13594 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
13595 inst.relax, sym, offset, NULL/*offset, opcode*/);
13596 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
13599 /* Write a 32-bit thumb instruction to buf. */
13601 put_thumb32_insn (char * buf, unsigned long insn)
13603 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
13604 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
13608 output_inst (const char * str)
13614 as_bad ("%s -- `%s'", inst.error, str);
13618 output_relax_insn();
13621 if (inst.size == 0)
13624 to = frag_more (inst.size);
13626 if (thumb_mode && (inst.size > THUMB_SIZE))
13628 assert (inst.size == (2 * THUMB_SIZE));
13629 put_thumb32_insn (to, inst.instruction);
13631 else if (inst.size > INSN_SIZE)
13633 assert (inst.size == (2 * INSN_SIZE));
13634 md_number_to_chars (to, inst.instruction, INSN_SIZE);
13635 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
13638 md_number_to_chars (to, inst.instruction, inst.size);
13640 if (inst.reloc.type != BFD_RELOC_UNUSED)
13641 fix_new_arm (frag_now, to - frag_now->fr_literal,
13642 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
13646 dwarf2_emit_insn (inst.size);
13650 /* Tag values used in struct asm_opcode's tag field. */
13653 OT_unconditional, /* Instruction cannot be conditionalized.
13654 The ARM condition field is still 0xE. */
13655 OT_unconditionalF, /* Instruction cannot be conditionalized
13656 and carries 0xF in its ARM condition field. */
13657 OT_csuffix, /* Instruction takes a conditional suffix. */
13658 OT_csuffixF, /* Some forms of the instruction take a conditional
13659 suffix, others place 0xF where the condition field
13661 OT_cinfix3, /* Instruction takes a conditional infix,
13662 beginning at character index 3. (In
13663 unified mode, it becomes a suffix.) */
13664 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
13665 character index 3, even in unified mode. Used for
13666 legacy instructions where suffix and infix forms
13667 may be ambiguous. */
13668 OT_csuf_or_in3, /* Instruction takes either a conditional
13669 suffix or an infix at character index 3. */
13670 OT_odd_infix_unc, /* This is the unconditional variant of an
13671 instruction that takes a conditional infix
13672 at an unusual position. In unified mode,
13673 this variant will accept a suffix. */
13674 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
13675 are the conditional variants of instructions that
13676 take conditional infixes in unusual positions.
13677 The infix appears at character index
13678 (tag - OT_odd_infix_0). These are not accepted
13679 in unified mode. */
13682 /* Subroutine of md_assemble, responsible for looking up the primary
13683 opcode from the mnemonic the user wrote. STR points to the
13684 beginning of the mnemonic.
13686 This is not simply a hash table lookup, because of conditional
13687 variants. Most instructions have conditional variants, which are
13688 expressed with a _conditional affix_ to the mnemonic. If we were
13689 to encode each conditional variant as a literal string in the opcode
13690 table, it would have approximately 20,000 entries.
13692 Most mnemonics take this affix as a suffix, and in unified syntax,
13693 'most' is upgraded to 'all'. However, in the divided syntax, some
13694 instructions take the affix as an infix, notably the s-variants of
13695 the arithmetic instructions. Of those instructions, all but six
13696 have the infix appear after the third character of the mnemonic.
13698 Accordingly, the algorithm for looking up primary opcodes given
13701 1. Look up the identifier in the opcode table.
13702 If we find a match, go to step U.
13704 2. Look up the last two characters of the identifier in the
13705 conditions table. If we find a match, look up the first N-2
13706 characters of the identifier in the opcode table. If we
13707 find a match, go to step CE.
13709 3. Look up the fourth and fifth characters of the identifier in
13710 the conditions table. If we find a match, extract those
13711 characters from the identifier, and look up the remaining
13712 characters in the opcode table. If we find a match, go
13717 U. Examine the tag field of the opcode structure, in case this is
13718 one of the six instructions with its conditional infix in an
13719 unusual place. If it is, the tag tells us where to find the
13720 infix; look it up in the conditions table and set inst.cond
13721 accordingly. Otherwise, this is an unconditional instruction.
13722 Again set inst.cond accordingly. Return the opcode structure.
13724 CE. Examine the tag field to make sure this is an instruction that
13725 should receive a conditional suffix. If it is not, fail.
13726 Otherwise, set inst.cond from the suffix we already looked up,
13727 and return the opcode structure.
13729 CM. Examine the tag field to make sure this is an instruction that
13730 should receive a conditional infix after the third character.
13731 If it is not, fail. Otherwise, undo the edits to the current
13732 line of input and proceed as for case CE. */
13734 static const struct asm_opcode *
13735 opcode_lookup (char **str)
13739 const struct asm_opcode *opcode;
13740 const struct asm_cond *cond;
13742 bfd_boolean neon_supported;
13744 neon_supported = ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1);
13746 /* Scan up to the end of the mnemonic, which must end in white space,
13747 '.' (in unified mode, or for Neon instructions), or end of string. */
13748 for (base = end = *str; *end != '\0'; end++)
13749 if (*end == ' ' || ((unified_syntax || neon_supported) && *end == '.'))
13755 /* Handle a possible width suffix and/or Neon type suffix. */
13760 /* The .w and .n suffixes are only valid if the unified syntax is in
13762 if (unified_syntax && end[1] == 'w')
13764 else if (unified_syntax && end[1] == 'n')
13769 inst.vectype.elems = 0;
13771 *str = end + offset;
13773 if (end[offset] == '.')
13775 /* See if we have a Neon type suffix (possible in either unified or
13776 non-unified ARM syntax mode). */
13777 if (parse_neon_type (&inst.vectype, str) == FAIL)
13780 else if (end[offset] != '\0' && end[offset] != ' ')
13786 /* Look for unaffixed or special-case affixed mnemonic. */
13787 opcode = hash_find_n (arm_ops_hsh, base, end - base);
13791 if (opcode->tag < OT_odd_infix_0)
13793 inst.cond = COND_ALWAYS;
13797 if (unified_syntax)
13798 as_warn (_("conditional infixes are deprecated in unified syntax"));
13799 affix = base + (opcode->tag - OT_odd_infix_0);
13800 cond = hash_find_n (arm_cond_hsh, affix, 2);
13803 inst.cond = cond->value;
13807 /* Cannot have a conditional suffix on a mnemonic of less than two
13809 if (end - base < 3)
13812 /* Look for suffixed mnemonic. */
13814 cond = hash_find_n (arm_cond_hsh, affix, 2);
13815 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
13816 if (opcode && cond)
13819 switch (opcode->tag)
13821 case OT_cinfix3_legacy:
13822 /* Ignore conditional suffixes matched on infix only mnemonics. */
13826 case OT_odd_infix_unc:
13827 if (!unified_syntax)
13829 /* else fall through */
13833 case OT_csuf_or_in3:
13834 inst.cond = cond->value;
13837 case OT_unconditional:
13838 case OT_unconditionalF:
13841 inst.cond = cond->value;
13845 /* delayed diagnostic */
13846 inst.error = BAD_COND;
13847 inst.cond = COND_ALWAYS;
13856 /* Cannot have a usual-position infix on a mnemonic of less than
13857 six characters (five would be a suffix). */
13858 if (end - base < 6)
13861 /* Look for infixed mnemonic in the usual position. */
13863 cond = hash_find_n (arm_cond_hsh, affix, 2);
13867 memcpy (save, affix, 2);
13868 memmove (affix, affix + 2, (end - affix) - 2);
13869 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
13870 memmove (affix + 2, affix, (end - affix) - 2);
13871 memcpy (affix, save, 2);
13873 if (opcode && (opcode->tag == OT_cinfix3 || opcode->tag == OT_csuf_or_in3
13874 || opcode->tag == OT_cinfix3_legacy))
13877 if (unified_syntax && opcode->tag == OT_cinfix3)
13878 as_warn (_("conditional infixes are deprecated in unified syntax"));
13880 inst.cond = cond->value;
13888 md_assemble (char *str)
13891 const struct asm_opcode * opcode;
13893 /* Align the previous label if needed. */
13894 if (last_label_seen != NULL)
13896 symbol_set_frag (last_label_seen, frag_now);
13897 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
13898 S_SET_SEGMENT (last_label_seen, now_seg);
13901 memset (&inst, '\0', sizeof (inst));
13902 inst.reloc.type = BFD_RELOC_UNUSED;
13904 opcode = opcode_lookup (&p);
13907 /* It wasn't an instruction, but it might be a register alias of
13908 the form alias .req reg, or a Neon .dn/.qn directive. */
13909 if (!create_register_alias (str, p)
13910 && !create_neon_reg_alias (str, p))
13911 as_bad (_("bad instruction `%s'"), str);
13916 /* The value which unconditional instructions should have in place of the
13917 condition field. */
13918 inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1;
13922 arm_feature_set variant;
13924 variant = cpu_variant;
13925 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
13926 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
13927 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
13928 /* Check that this instruction is supported for this CPU. */
13929 if (!opcode->tvariant
13930 || (thumb_mode == 1
13931 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
13933 as_bad (_("selected processor does not support `%s'"), str);
13936 if (inst.cond != COND_ALWAYS && !unified_syntax
13937 && opcode->tencode != do_t_branch)
13939 as_bad (_("Thumb does not support conditional execution"));
13943 /* Check conditional suffixes. */
13944 if (current_it_mask)
13947 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
13948 current_it_mask <<= 1;
13949 current_it_mask &= 0x1f;
13950 /* The BKPT instruction is unconditional even in an IT block. */
13952 && cond != inst.cond && opcode->tencode != do_t_bkpt)
13954 as_bad (_("incorrect condition in IT block"));
13958 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
13960 as_bad (_("thumb conditional instrunction not in IT block"));
13964 mapping_state (MAP_THUMB);
13965 inst.instruction = opcode->tvalue;
13967 if (!parse_operands (p, opcode->operands))
13968 opcode->tencode ();
13970 /* Clear current_it_mask at the end of an IT block. */
13971 if (current_it_mask == 0x10)
13972 current_it_mask = 0;
13974 if (!(inst.error || inst.relax))
13976 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
13977 inst.size = (inst.instruction > 0xffff ? 4 : 2);
13978 if (inst.size_req && inst.size_req != inst.size)
13980 as_bad (_("cannot honor width suffix -- `%s'"), str);
13984 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13985 *opcode->tvariant);
13986 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
13987 set those bits when Thumb-2 32-bit instuctions are seen. ie.
13988 anything other than bl/blx.
13989 This is overly pessimistic for relaxable instructions. */
13990 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
13992 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13995 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
13997 /* Check that this instruction is supported for this CPU. */
13998 if (!opcode->avariant ||
13999 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
14001 as_bad (_("selected processor does not support `%s'"), str);
14006 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
14010 mapping_state (MAP_ARM);
14011 inst.instruction = opcode->avalue;
14012 if (opcode->tag == OT_unconditionalF)
14013 inst.instruction |= 0xF << 28;
14015 inst.instruction |= inst.cond << 28;
14016 inst.size = INSN_SIZE;
14017 if (!parse_operands (p, opcode->operands))
14018 opcode->aencode ();
14019 /* Arm mode bx is marked as both v4T and v5 because it's still required
14020 on a hypothetical non-thumb v5 core. */
14021 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
14022 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
14023 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
14025 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
14026 *opcode->avariant);
14030 as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
14037 /* Various frobbings of labels and their addresses. */
14040 arm_start_line_hook (void)
14042 last_label_seen = NULL;
14046 arm_frob_label (symbolS * sym)
14048 last_label_seen = sym;
14050 ARM_SET_THUMB (sym, thumb_mode);
14052 #if defined OBJ_COFF || defined OBJ_ELF
14053 ARM_SET_INTERWORK (sym, support_interwork);
14056 /* Note - do not allow local symbols (.Lxxx) to be labeled
14057 as Thumb functions. This is because these labels, whilst
14058 they exist inside Thumb code, are not the entry points for
14059 possible ARM->Thumb calls. Also, these labels can be used
14060 as part of a computed goto or switch statement. eg gcc
14061 can generate code that looks like this:
14063 ldr r2, [pc, .Laaa]
14073 The first instruction loads the address of the jump table.
14074 The second instruction converts a table index into a byte offset.
14075 The third instruction gets the jump address out of the table.
14076 The fourth instruction performs the jump.
14078 If the address stored at .Laaa is that of a symbol which has the
14079 Thumb_Func bit set, then the linker will arrange for this address
14080 to have the bottom bit set, which in turn would mean that the
14081 address computation performed by the third instruction would end
14082 up with the bottom bit set. Since the ARM is capable of unaligned
14083 word loads, the instruction would then load the incorrect address
14084 out of the jump table, and chaos would ensue. */
14085 if (label_is_thumb_function_name
14086 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
14087 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
14089 /* When the address of a Thumb function is taken the bottom
14090 bit of that address should be set. This will allow
14091 interworking between Arm and Thumb functions to work
14094 THUMB_SET_FUNC (sym, 1);
14096 label_is_thumb_function_name = FALSE;
14100 dwarf2_emit_label (sym);
14105 arm_data_in_code (void)
14107 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
14109 *input_line_pointer = '/';
14110 input_line_pointer += 5;
14111 *input_line_pointer = 0;
14119 arm_canonicalize_symbol_name (char * name)
14123 if (thumb_mode && (len = strlen (name)) > 5
14124 && streq (name + len - 5, "/data"))
14125 *(name + len - 5) = 0;
14130 /* Table of all register names defined by default. The user can
14131 define additional names with .req. Note that all register names
14132 should appear in both upper and lowercase variants. Some registers
14133 also have mixed-case names. */
14135 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
14136 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
14137 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
14138 #define REGSET(p,t) \
14139 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
14140 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
14141 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
14142 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
14143 #define REGSETH(p,t) \
14144 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
14145 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
14146 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
14147 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
14148 #define REGSET2(p,t) \
14149 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
14150 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
14151 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
14152 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
14154 static const struct reg_entry reg_names[] =
14156 /* ARM integer registers. */
14157 REGSET(r, RN), REGSET(R, RN),
14159 /* ATPCS synonyms. */
14160 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
14161 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
14162 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
14164 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
14165 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
14166 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
14168 /* Well-known aliases. */
14169 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
14170 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
14172 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
14173 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
14175 /* Coprocessor numbers. */
14176 REGSET(p, CP), REGSET(P, CP),
14178 /* Coprocessor register numbers. The "cr" variants are for backward
14180 REGSET(c, CN), REGSET(C, CN),
14181 REGSET(cr, CN), REGSET(CR, CN),
14183 /* FPA registers. */
14184 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
14185 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
14187 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
14188 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
14190 /* VFP SP registers. */
14191 REGSET(s,VFS), REGSET(S,VFS),
14192 REGSETH(s,VFS), REGSETH(S,VFS),
14194 /* VFP DP Registers. */
14195 REGSET(d,VFD), REGSET(D,VFD),
14196 /* Extra Neon DP registers. */
14197 REGSETH(d,VFD), REGSETH(D,VFD),
14199 /* Neon QP registers. */
14200 REGSET2(q,NQ), REGSET2(Q,NQ),
14202 /* VFP control registers. */
14203 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
14204 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
14206 /* Maverick DSP coprocessor registers. */
14207 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
14208 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
14210 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
14211 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
14212 REGDEF(dspsc,0,DSPSC),
14214 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
14215 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
14216 REGDEF(DSPSC,0,DSPSC),
14218 /* iWMMXt data registers - p0, c0-15. */
14219 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
14221 /* iWMMXt control registers - p1, c0-3. */
14222 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
14223 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
14224 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
14225 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
14227 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
14228 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
14229 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
14230 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
14231 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
14233 /* XScale accumulator registers. */
14234 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
14240 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
14241 within psr_required_here. */
14242 static const struct asm_psr psrs[] =
14244 /* Backward compatibility notation. Note that "all" is no longer
14245 truly all possible PSR bits. */
14246 {"all", PSR_c | PSR_f},
14250 /* Individual flags. */
14255 /* Combinations of flags. */
14256 {"fs", PSR_f | PSR_s},
14257 {"fx", PSR_f | PSR_x},
14258 {"fc", PSR_f | PSR_c},
14259 {"sf", PSR_s | PSR_f},
14260 {"sx", PSR_s | PSR_x},
14261 {"sc", PSR_s | PSR_c},
14262 {"xf", PSR_x | PSR_f},
14263 {"xs", PSR_x | PSR_s},
14264 {"xc", PSR_x | PSR_c},
14265 {"cf", PSR_c | PSR_f},
14266 {"cs", PSR_c | PSR_s},
14267 {"cx", PSR_c | PSR_x},
14268 {"fsx", PSR_f | PSR_s | PSR_x},
14269 {"fsc", PSR_f | PSR_s | PSR_c},
14270 {"fxs", PSR_f | PSR_x | PSR_s},
14271 {"fxc", PSR_f | PSR_x | PSR_c},
14272 {"fcs", PSR_f | PSR_c | PSR_s},
14273 {"fcx", PSR_f | PSR_c | PSR_x},
14274 {"sfx", PSR_s | PSR_f | PSR_x},
14275 {"sfc", PSR_s | PSR_f | PSR_c},
14276 {"sxf", PSR_s | PSR_x | PSR_f},
14277 {"sxc", PSR_s | PSR_x | PSR_c},
14278 {"scf", PSR_s | PSR_c | PSR_f},
14279 {"scx", PSR_s | PSR_c | PSR_x},
14280 {"xfs", PSR_x | PSR_f | PSR_s},
14281 {"xfc", PSR_x | PSR_f | PSR_c},
14282 {"xsf", PSR_x | PSR_s | PSR_f},
14283 {"xsc", PSR_x | PSR_s | PSR_c},
14284 {"xcf", PSR_x | PSR_c | PSR_f},
14285 {"xcs", PSR_x | PSR_c | PSR_s},
14286 {"cfs", PSR_c | PSR_f | PSR_s},
14287 {"cfx", PSR_c | PSR_f | PSR_x},
14288 {"csf", PSR_c | PSR_s | PSR_f},
14289 {"csx", PSR_c | PSR_s | PSR_x},
14290 {"cxf", PSR_c | PSR_x | PSR_f},
14291 {"cxs", PSR_c | PSR_x | PSR_s},
14292 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
14293 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
14294 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
14295 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
14296 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
14297 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
14298 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
14299 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
14300 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
14301 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
14302 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
14303 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
14304 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
14305 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
14306 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
14307 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
14308 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
14309 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
14310 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
14311 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
14312 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
14313 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
14314 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
14315 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
14318 /* Table of V7M psr names. */
14319 static const struct asm_psr v7m_psrs[] =
14332 {"basepri_max", 18},
14337 /* Table of all shift-in-operand names. */
14338 static const struct asm_shift_name shift_names [] =
14340 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
14341 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
14342 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
14343 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
14344 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
14345 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
14348 /* Table of all explicit relocation names. */
14350 static struct reloc_entry reloc_names[] =
14352 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
14353 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
14354 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
14355 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
14356 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
14357 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
14358 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
14359 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
14360 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
14361 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
14362 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
14366 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
14367 static const struct asm_cond conds[] =
14371 {"cs", 0x2}, {"hs", 0x2},
14372 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
14386 static struct asm_barrier_opt barrier_opt_names[] =
14394 /* Table of ARM-format instructions. */
14396 /* Macros for gluing together operand strings. N.B. In all cases
14397 other than OPS0, the trailing OP_stop comes from default
14398 zero-initialization of the unspecified elements of the array. */
14399 #define OPS0() { OP_stop, }
14400 #define OPS1(a) { OP_##a, }
14401 #define OPS2(a,b) { OP_##a,OP_##b, }
14402 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
14403 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
14404 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
14405 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
14407 /* These macros abstract out the exact format of the mnemonic table and
14408 save some repeated characters. */
14410 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
14411 #define TxCE(mnem, op, top, nops, ops, ae, te) \
14412 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
14413 THUMB_VARIANT, do_##ae, do_##te }
14415 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
14416 a T_MNEM_xyz enumerator. */
14417 #define TCE(mnem, aop, top, nops, ops, ae, te) \
14418 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
14419 #define tCE(mnem, aop, top, nops, ops, ae, te) \
14420 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14422 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
14423 infix after the third character. */
14424 #define TxC3(mnem, op, top, nops, ops, ae, te) \
14425 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
14426 THUMB_VARIANT, do_##ae, do_##te }
14427 #define TC3(mnem, aop, top, nops, ops, ae, te) \
14428 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
14429 #define tC3(mnem, aop, top, nops, ops, ae, te) \
14430 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
14432 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
14433 appear in the condition table. */
14434 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
14435 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14436 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
14438 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
14439 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
14440 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
14441 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
14442 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
14443 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
14444 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
14445 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
14446 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
14447 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
14448 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
14449 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
14450 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
14451 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
14452 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
14453 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
14454 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
14455 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
14456 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
14457 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
14459 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
14460 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
14461 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
14462 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
14464 /* Mnemonic that cannot be conditionalized. The ARM condition-code
14465 field is still 0xE. Many of the Thumb variants can be executed
14466 conditionally, so this is checked separately. */
14467 #define TUE(mnem, op, top, nops, ops, ae, te) \
14468 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
14469 THUMB_VARIANT, do_##ae, do_##te }
14471 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
14472 condition code field. */
14473 #define TUF(mnem, op, top, nops, ops, ae, te) \
14474 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
14475 THUMB_VARIANT, do_##ae, do_##te }
14477 /* ARM-only variants of all the above. */
14478 #define CE(mnem, op, nops, ops, ae) \
14479 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14481 #define C3(mnem, op, nops, ops, ae) \
14482 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14484 /* Legacy mnemonics that always have conditional infix after the third
14486 #define CL(mnem, op, nops, ops, ae) \
14487 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14488 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14490 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
14491 #define cCE(mnem, op, nops, ops, ae) \
14492 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14494 /* Legacy coprocessor instructions where conditional infix and conditional
14495 suffix are ambiguous. For consistency this includes all FPA instructions,
14496 not just the potentially ambiguous ones. */
14497 #define cCL(mnem, op, nops, ops, ae) \
14498 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
14499 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14501 /* Coprocessor, takes either a suffix or a position-3 infix
14502 (for an FPA corner case). */
14503 #define C3E(mnem, op, nops, ops, ae) \
14504 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
14505 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
14507 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
14508 { #m1 #m2 #m3, OPS##nops ops, \
14509 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
14510 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
14512 #define CM(m1, m2, op, nops, ops, ae) \
14513 xCM_(m1, , m2, op, nops, ops, ae), \
14514 xCM_(m1, eq, m2, op, nops, ops, ae), \
14515 xCM_(m1, ne, m2, op, nops, ops, ae), \
14516 xCM_(m1, cs, m2, op, nops, ops, ae), \
14517 xCM_(m1, hs, m2, op, nops, ops, ae), \
14518 xCM_(m1, cc, m2, op, nops, ops, ae), \
14519 xCM_(m1, ul, m2, op, nops, ops, ae), \
14520 xCM_(m1, lo, m2, op, nops, ops, ae), \
14521 xCM_(m1, mi, m2, op, nops, ops, ae), \
14522 xCM_(m1, pl, m2, op, nops, ops, ae), \
14523 xCM_(m1, vs, m2, op, nops, ops, ae), \
14524 xCM_(m1, vc, m2, op, nops, ops, ae), \
14525 xCM_(m1, hi, m2, op, nops, ops, ae), \
14526 xCM_(m1, ls, m2, op, nops, ops, ae), \
14527 xCM_(m1, ge, m2, op, nops, ops, ae), \
14528 xCM_(m1, lt, m2, op, nops, ops, ae), \
14529 xCM_(m1, gt, m2, op, nops, ops, ae), \
14530 xCM_(m1, le, m2, op, nops, ops, ae), \
14531 xCM_(m1, al, m2, op, nops, ops, ae)
14533 #define UE(mnem, op, nops, ops, ae) \
14534 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14536 #define UF(mnem, op, nops, ops, ae) \
14537 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
14539 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
14540 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
14541 use the same encoding function for each. */
14542 #define NUF(mnem, op, nops, ops, enc) \
14543 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
14544 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14546 /* Neon data processing, version which indirects through neon_enc_tab for
14547 the various overloaded versions of opcodes. */
14548 #define nUF(mnem, op, nops, ops, enc) \
14549 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
14550 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14552 /* Neon insn with conditional suffix for the ARM version, non-overloaded
14554 #define NCE_tag(mnem, op, nops, ops, enc, tag) \
14555 { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT, \
14556 THUMB_VARIANT, do_##enc, do_##enc }
14558 #define NCE(mnem, op, nops, ops, enc) \
14559 NCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14561 #define NCEF(mnem, op, nops, ops, enc) \
14562 NCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14564 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
14565 #define nCE_tag(mnem, op, nops, ops, enc, tag) \
14566 { #mnem, OPS##nops ops, tag, N_MNEM_##op, N_MNEM_##op, \
14567 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
14569 #define nCE(mnem, op, nops, ops, enc) \
14570 nCE_tag(mnem, op, nops, ops, enc, OT_csuffix)
14572 #define nCEF(mnem, op, nops, ops, enc) \
14573 nCE_tag(mnem, op, nops, ops, enc, OT_csuffixF)
14577 /* Thumb-only, unconditional. */
14578 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
14580 static const struct asm_opcode insns[] =
14582 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
14583 #define THUMB_VARIANT &arm_ext_v4t
14584 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
14585 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
14586 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
14587 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
14588 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
14589 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
14590 tCE(add, 0800000, add, 3, (RR, oRR, SHG), arit, t_add_sub),
14591 tC3(adds, 0900000, adds, 3, (RR, oRR, SHG), arit, t_add_sub),
14592 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
14593 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
14594 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
14595 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
14596 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
14597 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
14598 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
14599 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
14601 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
14602 for setting PSR flag bits. They are obsolete in V6 and do not
14603 have Thumb equivalents. */
14604 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14605 tC3(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
14606 CL(tstp, 110f000, 2, (RR, SH), cmp),
14607 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14608 tC3(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
14609 CL(cmpp, 150f000, 2, (RR, SH), cmp),
14610 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14611 tC3(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
14612 CL(cmnp, 170f000, 2, (RR, SH), cmp),
14614 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
14615 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
14616 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
14617 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
14619 tCE(ldr, 4100000, ldr, 2, (RR, ADDRGLDR),ldst, t_ldst),
14620 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14621 tCE(str, 4000000, str, 2, (RR, ADDRGLDR),ldst, t_ldst),
14622 tC3(strb, 4400000, strb, 2, (RR, ADDRGLDR),ldst, t_ldst),
14624 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14625 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14626 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14627 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14628 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14629 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14631 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
14632 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
14633 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
14634 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
14637 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
14638 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
14639 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
14641 /* Thumb-compatibility pseudo ops. */
14642 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
14643 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
14644 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
14645 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
14646 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
14647 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
14648 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
14649 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
14650 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
14651 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
14652 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
14653 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
14655 #undef THUMB_VARIANT
14656 #define THUMB_VARIANT &arm_ext_v6
14657 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
14659 /* V1 instructions with no Thumb analogue prior to V6T2. */
14660 #undef THUMB_VARIANT
14661 #define THUMB_VARIANT &arm_ext_v6t2
14662 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
14663 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
14664 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14665 TC3(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
14666 CL(teqp, 130f000, 2, (RR, SH), cmp),
14668 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
14669 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
14670 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
14671 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
14673 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14674 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14676 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14677 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
14679 /* V1 instructions with no Thumb analogue at all. */
14680 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
14681 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
14683 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
14684 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
14685 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
14686 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
14687 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
14688 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
14689 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
14690 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
14693 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
14694 #undef THUMB_VARIANT
14695 #define THUMB_VARIANT &arm_ext_v4t
14696 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14697 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
14699 #undef THUMB_VARIANT
14700 #define THUMB_VARIANT &arm_ext_v6t2
14701 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14702 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
14704 /* Generic coprocessor instructions. */
14705 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14706 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14707 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14708 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14709 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14710 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14711 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14714 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
14715 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14716 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
14719 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
14720 TCE(mrs, 10f0000, f3ef8000, 2, (APSR_RR, RVC_PSR), mrs, t_mrs),
14721 TCE(msr, 120f000, f3808000, 2, (RVC_PSR, RR_EXi), msr, t_msr),
14724 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
14725 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14726 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14727 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14728 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14729 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14730 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14731 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
14732 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
14735 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
14736 #undef THUMB_VARIANT
14737 #define THUMB_VARIANT &arm_ext_v4t
14738 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14739 tC3(strh, 00000b0, strh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14740 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14741 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14742 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14743 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDRGLDRS), ldstv4, t_ldst),
14746 #define ARM_VARIANT &arm_ext_v4t_5
14747 /* ARM Architecture 4T. */
14748 /* Note: bx (and blx) are required on V5, even if the processor does
14749 not support Thumb. */
14750 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
14753 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
14754 #undef THUMB_VARIANT
14755 #define THUMB_VARIANT &arm_ext_v5t
14756 /* Note: blx has 2 variants; the .value coded here is for
14757 BLX(2). Only this variant has conditional execution. */
14758 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
14759 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
14761 #undef THUMB_VARIANT
14762 #define THUMB_VARIANT &arm_ext_v6t2
14763 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
14764 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14765 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14766 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14767 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDRGLDC), lstc, lstc),
14768 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
14769 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14770 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
14773 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
14774 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14775 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14776 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14777 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14779 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14780 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
14782 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14783 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14784 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14785 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
14787 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14788 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14789 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14790 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14792 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14793 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14795 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14796 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14797 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14798 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
14801 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
14802 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
14803 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14804 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDRGLDRS), ldrd, t_ldstd),
14806 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14807 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14810 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
14811 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
14814 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
14815 #undef THUMB_VARIANT
14816 #define THUMB_VARIANT &arm_ext_v6
14817 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
14818 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
14819 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14820 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14821 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
14822 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14823 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14824 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14825 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14826 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
14828 #undef THUMB_VARIANT
14829 #define THUMB_VARIANT &arm_ext_v6t2
14830 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
14831 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14832 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
14834 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
14835 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
14837 /* ARM V6 not included in V7M (eg. integer SIMD). */
14838 #undef THUMB_VARIANT
14839 #define THUMB_VARIANT &arm_ext_v6_notm
14840 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
14841 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
14842 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
14843 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14844 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14845 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14846 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14847 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14848 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14849 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14850 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14851 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14852 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14853 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14854 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14855 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14856 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14857 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14858 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14859 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14860 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14861 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14862 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14863 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14864 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14865 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14866 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14867 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14868 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14869 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14870 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14871 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14872 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14873 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14874 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14875 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14876 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14877 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14878 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14879 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14880 UF(rfeib, 9900a00, 1, (RRw), rfe),
14881 UF(rfeda, 8100a00, 1, (RRw), rfe),
14882 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14883 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
14884 UF(rfefa, 9900a00, 1, (RRw), rfe),
14885 UF(rfeea, 8100a00, 1, (RRw), rfe),
14886 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
14887 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14888 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14889 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14890 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14891 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14892 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14893 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
14894 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
14895 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
14896 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14897 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14898 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14899 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14900 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14901 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14902 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14903 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
14904 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14905 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14906 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14907 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14908 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14909 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14910 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14911 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14912 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14913 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14914 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
14915 UF(srsib, 9cd0500, 1, (I31w), srs),
14916 UF(srsda, 84d0500, 1, (I31w), srs),
14917 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
14918 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
14919 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
14920 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
14921 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
14922 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
14923 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
14926 #define ARM_VARIANT &arm_ext_v6k
14927 #undef THUMB_VARIANT
14928 #define THUMB_VARIANT &arm_ext_v6k
14929 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
14930 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
14931 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
14932 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
14934 #undef THUMB_VARIANT
14935 #define THUMB_VARIANT &arm_ext_v6_notm
14936 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
14937 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
14939 #undef THUMB_VARIANT
14940 #define THUMB_VARIANT &arm_ext_v6t2
14941 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14942 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
14943 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14944 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
14945 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
14948 #define ARM_VARIANT &arm_ext_v6z
14949 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
14952 #define ARM_VARIANT &arm_ext_v6t2
14953 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
14954 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
14955 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14956 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
14958 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
14959 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
14960 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
14961 TCE(rbit, 6ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
14963 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14964 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14965 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14966 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
14968 UT(cbnz, b900, 2, (RR, EXP), t_czb),
14969 UT(cbz, b100, 2, (RR, EXP), t_czb),
14970 /* ARM does not really have an IT instruction, so always allow it. */
14972 #define ARM_VARIANT &arm_ext_v1
14973 TUE(it, 0, bf08, 1, (COND), it, t_it),
14974 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
14975 TUE(ite, 0, bf04, 1, (COND), it, t_it),
14976 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
14977 TUE(itet, 0, bf06, 1, (COND), it, t_it),
14978 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
14979 TUE(itee, 0, bf02, 1, (COND), it, t_it),
14980 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
14981 TUE(itett, 0, bf07, 1, (COND), it, t_it),
14982 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
14983 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
14984 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
14985 TUE(itete, 0, bf05, 1, (COND), it, t_it),
14986 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
14987 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
14989 /* Thumb2 only instructions. */
14991 #define ARM_VARIANT NULL
14993 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
14994 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
14995 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
14996 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
14998 /* Thumb-2 hardware division instructions (R and M profiles only). */
14999 #undef THUMB_VARIANT
15000 #define THUMB_VARIANT &arm_ext_div
15001 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
15002 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
15004 /* ARM V7 instructions. */
15006 #define ARM_VARIANT &arm_ext_v7
15007 #undef THUMB_VARIANT
15008 #define THUMB_VARIANT &arm_ext_v7
15009 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
15010 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
15011 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
15012 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
15013 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
15016 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
15017 cCE(wfs, e200110, 1, (RR), rd),
15018 cCE(rfs, e300110, 1, (RR), rd),
15019 cCE(wfc, e400110, 1, (RR), rd),
15020 cCE(rfc, e500110, 1, (RR), rd),
15022 cCL(ldfs, c100100, 2, (RF, ADDRGLDC), rd_cpaddr),
15023 cCL(ldfd, c108100, 2, (RF, ADDRGLDC), rd_cpaddr),
15024 cCL(ldfe, c500100, 2, (RF, ADDRGLDC), rd_cpaddr),
15025 cCL(ldfp, c508100, 2, (RF, ADDRGLDC), rd_cpaddr),
15027 cCL(stfs, c000100, 2, (RF, ADDRGLDC), rd_cpaddr),
15028 cCL(stfd, c008100, 2, (RF, ADDRGLDC), rd_cpaddr),
15029 cCL(stfe, c400100, 2, (RF, ADDRGLDC), rd_cpaddr),
15030 cCL(stfp, c408100, 2, (RF, ADDRGLDC), rd_cpaddr),
15032 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
15033 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
15034 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
15035 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
15036 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
15037 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
15038 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
15039 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
15040 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
15041 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
15042 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
15043 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
15045 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
15046 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
15047 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
15048 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
15049 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
15050 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
15051 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
15052 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
15053 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
15054 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
15055 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
15056 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
15058 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
15059 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
15060 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
15061 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
15062 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
15063 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
15064 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
15065 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
15066 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
15067 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
15068 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
15069 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
15071 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
15072 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
15073 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
15074 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
15075 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
15076 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
15077 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
15078 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
15079 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
15080 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
15081 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
15082 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
15084 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
15085 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
15086 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
15087 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
15088 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
15089 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
15090 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
15091 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
15092 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
15093 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
15094 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
15095 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
15097 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
15098 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
15099 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
15100 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
15101 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
15102 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
15103 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
15104 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
15105 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
15106 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
15107 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
15108 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
15110 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
15111 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
15112 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
15113 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
15114 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
15115 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
15116 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
15117 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
15118 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
15119 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
15120 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
15121 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
15123 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
15124 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
15125 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
15126 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
15127 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
15128 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
15129 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
15130 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
15131 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
15132 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
15133 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
15134 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
15136 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
15137 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
15138 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
15139 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
15140 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
15141 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
15142 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
15143 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
15144 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
15145 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
15146 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
15147 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
15149 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
15150 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
15151 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
15152 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
15153 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
15154 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
15155 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
15156 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
15157 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
15158 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
15159 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
15160 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
15162 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
15163 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
15164 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
15165 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
15166 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
15167 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
15168 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
15169 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
15170 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
15171 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
15172 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
15173 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
15175 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
15176 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
15177 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
15178 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
15179 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
15180 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
15181 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
15182 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
15183 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
15184 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
15185 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
15186 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
15188 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
15189 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
15190 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
15191 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
15192 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
15193 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
15194 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
15195 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
15196 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
15197 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
15198 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
15199 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
15201 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
15202 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
15203 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
15204 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
15205 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
15206 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
15207 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
15208 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
15209 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
15210 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
15211 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
15212 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
15214 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
15215 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
15216 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
15217 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
15218 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
15219 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
15220 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
15221 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
15222 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
15223 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
15224 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
15225 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
15227 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
15228 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
15229 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
15230 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
15231 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
15232 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
15233 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
15234 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
15235 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
15236 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
15237 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
15238 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
15240 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
15241 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
15242 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
15243 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
15244 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
15245 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15246 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15247 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15248 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
15249 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
15250 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
15251 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
15253 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
15254 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
15255 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
15256 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
15257 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
15258 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15259 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15260 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15261 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
15262 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
15263 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
15264 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
15266 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
15267 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
15268 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
15269 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
15270 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
15271 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15272 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15273 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15274 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
15275 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
15276 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
15277 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
15279 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
15280 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
15281 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
15282 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
15283 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
15284 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15285 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15286 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15287 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
15288 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
15289 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
15290 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
15292 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
15293 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
15294 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
15295 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
15296 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
15297 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15298 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15299 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15300 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
15301 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
15302 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
15303 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
15305 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
15306 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
15307 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
15308 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
15309 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
15310 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15311 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15312 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15313 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
15314 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
15315 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
15316 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
15318 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
15319 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
15320 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
15321 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
15322 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
15323 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15324 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15325 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15326 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
15327 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
15328 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
15329 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
15331 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
15332 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
15333 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
15334 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
15335 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
15336 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15337 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15338 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15339 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
15340 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
15341 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
15342 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
15344 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
15345 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
15346 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
15347 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
15348 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
15349 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15350 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15351 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15352 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
15353 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
15354 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
15355 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
15357 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
15358 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
15359 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
15360 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
15361 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
15362 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15363 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15364 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15365 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
15366 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
15367 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
15368 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
15370 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15371 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15372 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15373 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15374 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15375 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15376 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15377 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15378 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15379 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15380 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15381 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15383 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15384 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15385 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15386 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15387 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15388 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15389 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15390 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15391 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15392 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15393 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15394 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15396 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
15397 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
15398 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
15399 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
15400 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
15401 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
15402 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
15403 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
15404 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
15405 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
15406 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
15407 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
15409 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
15410 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
15411 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
15412 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
15414 cCL(flts, e000110, 2, (RF, RR), rn_rd),
15415 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
15416 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
15417 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
15418 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
15419 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
15420 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
15421 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
15422 cCL(flte, e080110, 2, (RF, RR), rn_rd),
15423 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
15424 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
15425 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
15427 /* The implementation of the FIX instruction is broken on some
15428 assemblers, in that it accepts a precision specifier as well as a
15429 rounding specifier, despite the fact that this is meaningless.
15430 To be more compatible, we accept it as well, though of course it
15431 does not set any bits. */
15432 cCE(fix, e100110, 2, (RR, RF), rd_rm),
15433 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
15434 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
15435 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
15436 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
15437 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
15438 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
15439 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
15440 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
15441 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
15442 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
15443 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
15444 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
15446 /* Instructions that were new with the real FPA, call them V2. */
15448 #define ARM_VARIANT &fpu_fpa_ext_v2
15449 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15450 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15451 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15452 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15453 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15454 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
15457 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
15458 /* Moves and type conversions. */
15459 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
15460 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
15461 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
15462 cCE(fmstat, ef1fa10, 0, (), noargs),
15463 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
15464 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
15465 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
15466 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15467 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
15468 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
15469 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
15470 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
15472 /* Memory operations. */
15473 cCE(flds, d100a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15474 cCE(fsts, d000a00, 2, (RVS, ADDRGLDC), vfp_sp_ldst),
15475 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15476 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15477 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15478 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15479 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15480 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15481 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15482 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15483 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15484 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
15485 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15486 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
15487 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15488 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
15489 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15490 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
15492 /* Monadic operations. */
15493 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
15494 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
15495 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
15497 /* Dyadic operations. */
15498 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15499 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15500 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15501 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15502 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15503 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15504 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15505 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15506 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
15509 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
15510 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
15511 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
15512 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
15515 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
15516 /* Moves and type conversions. */
15517 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15518 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15519 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15520 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
15521 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
15522 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
15523 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
15524 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
15525 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
15526 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15527 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15528 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
15529 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
15531 /* Memory operations. */
15532 cCE(fldd, d100b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15533 cCE(fstd, d000b00, 2, (RVD, ADDRGLDC), vfp_dp_ldst),
15534 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15535 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15536 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15537 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15538 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15539 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
15540 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15541 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
15543 /* Monadic operations. */
15544 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15545 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15546 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15548 /* Dyadic operations. */
15549 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15550 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15551 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15552 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15553 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15554 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15555 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15556 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15557 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
15560 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
15561 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
15562 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
15563 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
15566 #define ARM_VARIANT &fpu_vfp_ext_v2
15567 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
15568 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
15569 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
15570 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
15572 /* Instructions which may belong to either the Neon or VFP instruction sets.
15573 Individual encoder functions perform additional architecture checks. */
15575 #define ARM_VARIANT &fpu_vfp_ext_v1xd
15576 #undef THUMB_VARIANT
15577 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
15578 /* These mnemonics are unique to VFP. */
15579 NCE(vsqrt, 0, 2, (RVSD, RVSD), vfp_nsyn_sqrt),
15580 NCE(vdiv, 0, 3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
15581 nCE(vnmul, vnmul, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15582 nCE(vnmla, vnmla, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15583 nCE(vnmls, vnmls, 3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
15584 nCE(vcmp, vcmp, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15585 nCE(vcmpe, vcmpe, 2, (RVSD, RVSD_I0), vfp_nsyn_cmp),
15586 NCE(vpush, 0, 1, (VRSDLST), vfp_nsyn_push),
15587 NCE(vpop, 0, 1, (VRSDLST), vfp_nsyn_pop),
15588 NCE(vcvtz, 0, 2, (RVSD, RVSD), vfp_nsyn_cvtz),
15590 /* Mnemonics shared by Neon and VFP. */
15591 nCEF(vmul, vmul, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mul),
15592 nCEF(vmla, vmla, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15593 nCEF(vmls, vmls, 3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
15595 nCEF(vadd, vadd, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15596 nCEF(vsub, vsub, 3, (RNSDQ, oRNSDQ, RNSDQ), neon_addsub_if_i),
15598 NCEF(vabs, 1b10300, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15599 NCEF(vneg, 1b10380, 2, (RNSDQ, RNSDQ), neon_abs_neg),
15601 NCE(vldm, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15602 NCE(vldmia, c900b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15603 NCE(vldmdb, d100b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15604 NCE(vstm, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15605 NCE(vstmia, c800b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15606 NCE(vstmdb, d000b00, 2, (RRw, VRSDLST), neon_ldm_stm),
15607 NCE(vldr, d100b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15608 NCE(vstr, d000b00, 2, (RVSD, ADDRGLDC), neon_ldr_str),
15610 nCEF(vcvt, vcvt, 3, (RNSDQ, RNSDQ, oI32b), neon_cvt),
15612 /* NOTE: All VMOV encoding is special-cased! */
15613 NCE(vmov, 0, 1, (VMOV), neon_mov),
15614 NCE(vmovq, 0, 1, (VMOV), neon_mov),
15616 #undef THUMB_VARIANT
15617 #define THUMB_VARIANT &fpu_neon_ext_v1
15619 #define ARM_VARIANT &fpu_neon_ext_v1
15620 /* Data processing with three registers of the same length. */
15621 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
15622 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
15623 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
15624 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15625 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15626 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15627 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15628 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
15629 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
15630 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
15631 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15632 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15633 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15634 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15635 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15636 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15637 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
15638 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
15639 /* If not immediate, fall back to neon_dyadic_i64_su.
15640 shl_imm should accept I8 I16 I32 I64,
15641 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
15642 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
15643 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
15644 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
15645 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
15646 /* Logic ops, types optional & ignored. */
15647 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
15648 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
15649 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
15650 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
15651 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
15652 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
15653 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
15654 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
15655 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
15656 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
15657 /* Bitfield ops, untyped. */
15658 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15659 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15660 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15661 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15662 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
15663 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
15664 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
15665 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15666 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15667 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15668 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15669 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
15670 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
15671 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
15672 back to neon_dyadic_if_su. */
15673 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15674 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15675 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
15676 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
15677 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15678 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15679 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
15680 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
15681 /* Comparison. Type I8 I16 I32 F32. */
15682 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
15683 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
15684 /* As above, D registers only. */
15685 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15686 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
15687 /* Int and float variants, signedness unimportant. */
15688 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15689 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
15690 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
15691 /* Add/sub take types I8 I16 I32 I64 F32. */
15692 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15693 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
15694 /* vtst takes sizes 8, 16, 32. */
15695 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
15696 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
15697 /* VMUL takes I8 I16 I32 F32 P8. */
15698 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
15699 /* VQD{R}MULH takes S16 S32. */
15700 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15701 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15702 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
15703 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
15704 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15705 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15706 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
15707 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
15708 NUF(vaclt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15709 NUF(vacltq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15710 NUF(vacle, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
15711 NUF(vacleq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
15712 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15713 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15714 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
15715 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
15717 /* Two address, int/float. Types S8 S16 S32 F32. */
15718 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
15719 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
15721 /* Data processing with two registers and a shift amount. */
15722 /* Right shifts, and variants with rounding.
15723 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
15724 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15725 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15726 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
15727 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
15728 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15729 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15730 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
15731 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
15732 /* Shift and insert. Sizes accepted 8 16 32 64. */
15733 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
15734 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
15735 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
15736 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
15737 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
15738 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
15739 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
15740 /* Right shift immediate, saturating & narrowing, with rounding variants.
15741 Types accepted S16 S32 S64 U16 U32 U64. */
15742 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15743 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
15744 /* As above, unsigned. Types accepted S16 S32 S64. */
15745 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15746 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
15747 /* Right shift narrowing. Types accepted I16 I32 I64. */
15748 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15749 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
15750 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
15751 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
15752 /* CVT with optional immediate for fixed-point variant. */
15753 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
15755 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
15756 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
15758 /* Data processing, three registers of different lengths. */
15759 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
15760 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
15761 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
15762 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
15763 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
15764 /* If not scalar, fall back to neon_dyadic_long.
15765 Vector types as above, scalar types S16 S32 U16 U32. */
15766 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15767 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
15768 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
15769 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15770 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
15771 /* Dyadic, narrowing insns. Types I16 I32 I64. */
15772 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15773 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15774 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15775 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
15776 /* Saturating doubling multiplies. Types S16 S32. */
15777 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15778 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15779 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
15780 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
15781 S16 S32 U16 U32. */
15782 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
15784 /* Extract. Size 8. */
15785 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
15786 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
15788 /* Two registers, miscellaneous. */
15789 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
15790 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
15791 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
15792 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
15793 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
15794 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
15795 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
15796 /* Vector replicate. Sizes 8 16 32. */
15797 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
15798 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
15799 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
15800 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
15801 /* VMOVN. Types I16 I32 I64. */
15802 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
15803 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
15804 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
15805 /* VQMOVUN. Types S16 S32 S64. */
15806 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
15807 /* VZIP / VUZP. Sizes 8 16 32. */
15808 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
15809 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
15810 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
15811 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
15812 /* VQABS / VQNEG. Types S8 S16 S32. */
15813 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15814 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
15815 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
15816 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
15817 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
15818 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
15819 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
15820 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
15821 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
15822 /* Reciprocal estimates. Types U32 F32. */
15823 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
15824 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
15825 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
15826 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
15827 /* VCLS. Types S8 S16 S32. */
15828 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
15829 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
15830 /* VCLZ. Types I8 I16 I32. */
15831 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
15832 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
15833 /* VCNT. Size 8. */
15834 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
15835 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
15836 /* Two address, untyped. */
15837 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
15838 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
15839 /* VTRN. Sizes 8 16 32. */
15840 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
15841 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
15843 /* Table lookup. Size 8. */
15844 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15845 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
15847 #undef THUMB_VARIANT
15848 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
15850 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
15851 /* Neon element/structure load/store. */
15852 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15853 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
15854 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15855 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
15856 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15857 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
15858 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15859 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
15861 #undef THUMB_VARIANT
15862 #define THUMB_VARIANT &fpu_vfp_ext_v3
15864 #define ARM_VARIANT &fpu_vfp_ext_v3
15865 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
15866 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
15867 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15868 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15869 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15870 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15871 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15872 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15873 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15874 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15875 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15876 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15877 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15878 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15879 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
15880 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
15881 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
15882 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
15884 #undef THUMB_VARIANT
15886 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
15887 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15888 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15889 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15890 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15891 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15892 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
15893 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
15894 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
15897 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
15898 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
15899 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
15900 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
15901 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
15902 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
15903 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
15904 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
15905 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
15906 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
15907 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15908 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15909 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
15910 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15911 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15912 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
15913 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15914 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15915 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
15916 cCE(tmcr, e000110, 2, (RIWC_RIWG, RR), rn_rd),
15917 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
15918 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15919 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15920 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15921 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15922 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15923 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
15924 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
15925 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
15926 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
15927 cCE(tmrc, e100110, 2, (RR, RIWC_RIWG), rd_rn),
15928 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
15929 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
15930 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
15931 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
15932 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
15933 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
15934 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
15935 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15936 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15937 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15938 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15939 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15940 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15941 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15942 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15943 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15944 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
15945 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15946 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15947 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15948 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15949 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15950 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15951 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15952 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15953 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15954 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15955 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15956 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15957 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15958 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15959 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15960 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15961 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15962 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15963 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15964 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15965 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
15966 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
15967 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
15968 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15969 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15970 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15971 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15972 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15973 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15974 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15975 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15976 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15977 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15978 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15979 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15980 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15981 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15982 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15983 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15984 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15985 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15986 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
15987 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15988 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15989 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15990 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15991 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15992 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15993 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15994 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15995 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15996 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15997 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
15998 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
15999 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16000 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16001 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16002 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16003 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16004 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16005 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16006 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16007 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16008 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
16009 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16010 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16011 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16012 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16013 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16014 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16015 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16016 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16017 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16018 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16019 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16020 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16021 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16022 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16023 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16024 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16025 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
16026 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
16027 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16028 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
16029 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
16030 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
16031 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16032 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16033 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16034 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16035 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16036 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16037 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16038 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16039 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16040 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
16041 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
16042 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
16043 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
16044 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
16045 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
16046 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16047 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16048 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16049 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
16050 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
16051 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
16052 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
16053 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
16054 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
16055 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16056 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16057 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16058 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16059 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
16062 #define ARM_VARIANT &arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2. */
16063 cCE(torvscb, e13f190, 1, (RR), iwmmxt_tandorc),
16064 cCE(torvsch, e53f190, 1, (RR), iwmmxt_tandorc),
16065 cCE(torvscw, e93f190, 1, (RR), iwmmxt_tandorc),
16066 cCE(wabsb, e2001c0, 2, (RIWR, RIWR), rd_rn),
16067 cCE(wabsh, e6001c0, 2, (RIWR, RIWR), rd_rn),
16068 cCE(wabsw, ea001c0, 2, (RIWR, RIWR), rd_rn),
16069 cCE(wabsdiffb, e1001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16070 cCE(wabsdiffh, e5001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16071 cCE(wabsdiffw, e9001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16072 cCE(waddbhusl, e2001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16073 cCE(waddbhusm, e6001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16074 cCE(waddhc, e600180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16075 cCE(waddwc, ea00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16076 cCE(waddsubhx, ea001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16077 cCE(wavg4, e400000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16078 cCE(wavg4r, e500000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16079 cCE(wmaddsn, ee00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16080 cCE(wmaddsx, eb00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16081 cCE(wmaddun, ec00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16082 cCE(wmaddux, e900100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16083 cCE(wmerge, e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
16084 cCE(wmiabb, e0000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16085 cCE(wmiabt, e1000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16086 cCE(wmiatb, e2000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16087 cCE(wmiatt, e3000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16088 cCE(wmiabbn, e4000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16089 cCE(wmiabtn, e5000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16090 cCE(wmiatbn, e6000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16091 cCE(wmiattn, e7000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16092 cCE(wmiawbb, e800120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16093 cCE(wmiawbt, e900120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16094 cCE(wmiawtb, ea00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16095 cCE(wmiawtt, eb00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16096 cCE(wmiawbbn, ec00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16097 cCE(wmiawbtn, ed00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16098 cCE(wmiawtbn, ee00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16099 cCE(wmiawttn, ef00120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16100 cCE(wmulsmr, ef00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16101 cCE(wmulumr, ed00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16102 cCE(wmulwumr, ec000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16103 cCE(wmulwsmr, ee000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16104 cCE(wmulwum, ed000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16105 cCE(wmulwsm, ef000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16106 cCE(wmulwl, eb000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16107 cCE(wqmiabb, e8000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16108 cCE(wqmiabt, e9000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16109 cCE(wqmiatb, ea000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16110 cCE(wqmiatt, eb000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16111 cCE(wqmiabbn, ec000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16112 cCE(wqmiabtn, ed000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16113 cCE(wqmiatbn, ee000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16114 cCE(wqmiattn, ef000a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16115 cCE(wqmulm, e100080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16116 cCE(wqmulmr, e300080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16117 cCE(wqmulwm, ec000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16118 cCE(wqmulwmr, ee000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16119 cCE(wsubaddhx, ed001c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
16122 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
16123 cCE(cfldrs, c100400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16124 cCE(cfldrd, c500400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16125 cCE(cfldr32, c100500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16126 cCE(cfldr64, c500500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16127 cCE(cfstrs, c000400, 2, (RMF, ADDRGLDC), rd_cpaddr),
16128 cCE(cfstrd, c400400, 2, (RMD, ADDRGLDC), rd_cpaddr),
16129 cCE(cfstr32, c000500, 2, (RMFX, ADDRGLDC), rd_cpaddr),
16130 cCE(cfstr64, c400500, 2, (RMDX, ADDRGLDC), rd_cpaddr),
16131 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
16132 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
16133 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
16134 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
16135 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
16136 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
16137 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
16138 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
16139 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
16140 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
16141 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
16142 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
16143 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
16144 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
16145 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
16146 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
16147 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
16148 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
16149 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
16150 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
16151 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
16152 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
16153 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
16154 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
16155 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
16156 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
16157 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
16158 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
16159 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
16160 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
16161 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
16162 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
16163 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
16164 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
16165 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
16166 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
16167 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
16168 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
16169 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
16170 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
16171 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
16172 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
16173 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
16174 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
16175 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
16176 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
16177 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
16178 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
16179 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
16180 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
16181 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
16182 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
16183 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
16184 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
16185 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
16186 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
16187 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16188 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16189 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16190 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16191 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16192 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
16193 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16194 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
16195 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16196 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
16197 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16198 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
16201 #undef THUMB_VARIANT
16228 /* MD interface: bits in the object file. */
16230 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
16231 for use in the a.out file, and stores them in the array pointed to by buf.
16232 This knows about the endian-ness of the target machine and does
16233 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
16234 2 (short) and 4 (long) Floating numbers are put out as a series of
16235 LITTLENUMS (shorts, here at least). */
16238 md_number_to_chars (char * buf, valueT val, int n)
16240 if (target_big_endian)
16241 number_to_chars_bigendian (buf, val, n);
16243 number_to_chars_littleendian (buf, val, n);
16247 md_chars_to_number (char * buf, int n)
16250 unsigned char * where = (unsigned char *) buf;
16252 if (target_big_endian)
16257 result |= (*where++ & 255);
16265 result |= (where[n] & 255);
16272 /* MD interface: Sections. */
16274 /* Estimate the size of a frag before relaxing. Assume everything fits in
16278 md_estimate_size_before_relax (fragS * fragp,
16279 segT segtype ATTRIBUTE_UNUSED)
16285 /* Convert a machine dependent frag. */
16288 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
16290 unsigned long insn;
16291 unsigned long old_op;
16299 buf = fragp->fr_literal + fragp->fr_fix;
16301 old_op = bfd_get_16(abfd, buf);
16302 if (fragp->fr_symbol) {
16303 exp.X_op = O_symbol;
16304 exp.X_add_symbol = fragp->fr_symbol;
16306 exp.X_op = O_constant;
16308 exp.X_add_number = fragp->fr_offset;
16309 opcode = fragp->fr_subtype;
16312 case T_MNEM_ldr_pc:
16313 case T_MNEM_ldr_pc2:
16314 case T_MNEM_ldr_sp:
16315 case T_MNEM_str_sp:
16322 if (fragp->fr_var == 4)
16324 insn = THUMB_OP32(opcode);
16325 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
16327 insn |= (old_op & 0x700) << 4;
16331 insn |= (old_op & 7) << 12;
16332 insn |= (old_op & 0x38) << 13;
16334 insn |= 0x00000c00;
16335 put_thumb32_insn (buf, insn);
16336 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
16340 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
16342 pc_rel = (opcode == T_MNEM_ldr_pc2);
16345 if (fragp->fr_var == 4)
16347 insn = THUMB_OP32 (opcode);
16348 insn |= (old_op & 0xf0) << 4;
16349 put_thumb32_insn (buf, insn);
16350 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
16354 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16355 exp.X_add_number -= 4;
16363 if (fragp->fr_var == 4)
16365 int r0off = (opcode == T_MNEM_mov
16366 || opcode == T_MNEM_movs) ? 0 : 8;
16367 insn = THUMB_OP32 (opcode);
16368 insn = (insn & 0xe1ffffff) | 0x10000000;
16369 insn |= (old_op & 0x700) << r0off;
16370 put_thumb32_insn (buf, insn);
16371 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16375 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
16380 if (fragp->fr_var == 4)
16382 insn = THUMB_OP32(opcode);
16383 put_thumb32_insn (buf, insn);
16384 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
16387 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
16391 if (fragp->fr_var == 4)
16393 insn = THUMB_OP32(opcode);
16394 insn |= (old_op & 0xf00) << 14;
16395 put_thumb32_insn (buf, insn);
16396 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
16399 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
16402 case T_MNEM_add_sp:
16403 case T_MNEM_add_pc:
16404 case T_MNEM_inc_sp:
16405 case T_MNEM_dec_sp:
16406 if (fragp->fr_var == 4)
16408 /* ??? Choose between add and addw. */
16409 insn = THUMB_OP32 (opcode);
16410 insn |= (old_op & 0xf0) << 4;
16411 put_thumb32_insn (buf, insn);
16412 if (opcode == T_MNEM_add_pc)
16413 reloc_type = BFD_RELOC_ARM_T32_IMM12;
16415 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16418 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16426 if (fragp->fr_var == 4)
16428 insn = THUMB_OP32 (opcode);
16429 insn |= (old_op & 0xf0) << 4;
16430 insn |= (old_op & 0xf) << 16;
16431 put_thumb32_insn (buf, insn);
16432 if (insn & (1 << 20))
16433 reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
16435 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
16438 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
16444 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
16446 fixp->fx_file = fragp->fr_file;
16447 fixp->fx_line = fragp->fr_line;
16448 fragp->fr_fix += fragp->fr_var;
16451 /* Return the size of a relaxable immediate operand instruction.
16452 SHIFT and SIZE specify the form of the allowable immediate. */
16454 relax_immediate (fragS *fragp, int size, int shift)
16460 /* ??? Should be able to do better than this. */
16461 if (fragp->fr_symbol)
16464 low = (1 << shift) - 1;
16465 mask = (1 << (shift + size)) - (1 << shift);
16466 offset = fragp->fr_offset;
16467 /* Force misaligned offsets to 32-bit variant. */
16470 if (offset & ~mask)
16475 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
16478 relax_adr (fragS *fragp, asection *sec)
16483 /* Assume worst case for symbols not known to be in the same section. */
16484 if (!S_IS_DEFINED(fragp->fr_symbol)
16485 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16488 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
16489 addr = fragp->fr_address + fragp->fr_fix;
16490 addr = (addr + 4) & ~3;
16491 /* Fix the insn as the 4-byte version if the target address is not
16492 sufficiently aligned. This is prevents an infinite loop when two
16493 instructions have contradictory range/alignment requirements. */
16497 if (val < 0 || val > 1020)
16502 /* Return the size of a relaxable add/sub immediate instruction. */
16504 relax_addsub (fragS *fragp, asection *sec)
16509 buf = fragp->fr_literal + fragp->fr_fix;
16510 op = bfd_get_16(sec->owner, buf);
16511 if ((op & 0xf) == ((op >> 4) & 0xf))
16512 return relax_immediate (fragp, 8, 0);
16514 return relax_immediate (fragp, 3, 0);
16518 /* Return the size of a relaxable branch instruction. BITS is the
16519 size of the offset field in the narrow instruction. */
16522 relax_branch (fragS *fragp, asection *sec, int bits)
16528 /* Assume worst case for symbols not known to be in the same section. */
16529 if (!S_IS_DEFINED(fragp->fr_symbol)
16530 || sec != S_GET_SEGMENT (fragp->fr_symbol))
16533 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
16534 addr = fragp->fr_address + fragp->fr_fix + 4;
16537 /* Offset is a signed value *2 */
16539 if (val >= limit || val < -limit)
16545 /* Relax a machine dependent frag. This returns the amount by which
16546 the current size of the frag should change. */
16549 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
16554 oldsize = fragp->fr_var;
16555 switch (fragp->fr_subtype)
16557 case T_MNEM_ldr_pc2:
16558 newsize = relax_adr(fragp, sec);
16560 case T_MNEM_ldr_pc:
16561 case T_MNEM_ldr_sp:
16562 case T_MNEM_str_sp:
16563 newsize = relax_immediate(fragp, 8, 2);
16567 newsize = relax_immediate(fragp, 5, 2);
16571 newsize = relax_immediate(fragp, 5, 1);
16575 newsize = relax_immediate(fragp, 5, 0);
16578 newsize = relax_adr(fragp, sec);
16584 newsize = relax_immediate(fragp, 8, 0);
16587 newsize = relax_branch(fragp, sec, 11);
16590 newsize = relax_branch(fragp, sec, 8);
16592 case T_MNEM_add_sp:
16593 case T_MNEM_add_pc:
16594 newsize = relax_immediate (fragp, 8, 2);
16596 case T_MNEM_inc_sp:
16597 case T_MNEM_dec_sp:
16598 newsize = relax_immediate (fragp, 7, 2);
16604 newsize = relax_addsub (fragp, sec);
16611 fragp->fr_var = -newsize;
16612 md_convert_frag (sec->owner, sec, fragp);
16614 return -(newsize + oldsize);
16616 fragp->fr_var = newsize;
16617 return newsize - oldsize;
16620 /* Round up a section size to the appropriate boundary. */
16623 md_section_align (segT segment ATTRIBUTE_UNUSED,
16629 /* Round all sects to multiple of 4. */
16630 return (size + 3) & ~3;
16634 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
16635 of an rs_align_code fragment. */
16638 arm_handle_align (fragS * fragP)
16640 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
16641 static char const thumb_noop[2] = { 0xc0, 0x46 };
16642 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
16643 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
16645 int bytes, fix, noop_size;
16649 if (fragP->fr_type != rs_align_code)
16652 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
16653 p = fragP->fr_literal + fragP->fr_fix;
16656 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
16657 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
16659 if (fragP->tc_frag_data)
16661 if (target_big_endian)
16662 noop = thumb_bigend_noop;
16665 noop_size = sizeof (thumb_noop);
16669 if (target_big_endian)
16670 noop = arm_bigend_noop;
16673 noop_size = sizeof (arm_noop);
16676 if (bytes & (noop_size - 1))
16678 fix = bytes & (noop_size - 1);
16679 memset (p, 0, fix);
16684 while (bytes >= noop_size)
16686 memcpy (p, noop, noop_size);
16688 bytes -= noop_size;
16692 fragP->fr_fix += fix;
16693 fragP->fr_var = noop_size;
16696 /* Called from md_do_align. Used to create an alignment
16697 frag in a code section. */
16700 arm_frag_align_code (int n, int max)
16704 /* We assume that there will never be a requirement
16705 to support alignments greater than 32 bytes. */
16706 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
16707 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
16709 p = frag_var (rs_align_code,
16710 MAX_MEM_FOR_RS_ALIGN_CODE,
16712 (relax_substateT) max,
16719 /* Perform target specific initialisation of a frag. */
16722 arm_init_frag (fragS * fragP)
16724 /* Record whether this frag is in an ARM or a THUMB area. */
16725 fragP->tc_frag_data = thumb_mode;
16729 /* When we change sections we need to issue a new mapping symbol. */
16732 arm_elf_change_section (void)
16735 segment_info_type *seginfo;
16737 /* Link an unlinked unwind index table section to the .text section. */
16738 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
16739 && elf_linked_to_section (now_seg) == NULL)
16740 elf_linked_to_section (now_seg) = text_section;
16742 if (!SEG_NORMAL (now_seg))
16745 flags = bfd_get_section_flags (stdoutput, now_seg);
16747 /* We can ignore sections that only contain debug info. */
16748 if ((flags & SEC_ALLOC) == 0)
16751 seginfo = seg_info (now_seg);
16752 mapstate = seginfo->tc_segment_info_data.mapstate;
16753 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
16757 arm_elf_section_type (const char * str, size_t len)
16759 if (len == 5 && strncmp (str, "exidx", 5) == 0)
16760 return SHT_ARM_EXIDX;
16765 /* Code to deal with unwinding tables. */
16767 static void add_unwind_adjustsp (offsetT);
16769 /* Cenerate and deferred unwind frame offset. */
16772 flush_pending_unwind (void)
16776 offset = unwind.pending_offset;
16777 unwind.pending_offset = 0;
16779 add_unwind_adjustsp (offset);
16782 /* Add an opcode to this list for this function. Two-byte opcodes should
16783 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
16787 add_unwind_opcode (valueT op, int length)
16789 /* Add any deferred stack adjustment. */
16790 if (unwind.pending_offset)
16791 flush_pending_unwind ();
16793 unwind.sp_restored = 0;
16795 if (unwind.opcode_count + length > unwind.opcode_alloc)
16797 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
16798 if (unwind.opcodes)
16799 unwind.opcodes = xrealloc (unwind.opcodes,
16800 unwind.opcode_alloc);
16802 unwind.opcodes = xmalloc (unwind.opcode_alloc);
16807 unwind.opcodes[unwind.opcode_count] = op & 0xff;
16809 unwind.opcode_count++;
16813 /* Add unwind opcodes to adjust the stack pointer. */
16816 add_unwind_adjustsp (offsetT offset)
16820 if (offset > 0x200)
16822 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
16827 /* Long form: 0xb2, uleb128. */
16828 /* This might not fit in a word so add the individual bytes,
16829 remembering the list is built in reverse order. */
16830 o = (valueT) ((offset - 0x204) >> 2);
16832 add_unwind_opcode (0, 1);
16834 /* Calculate the uleb128 encoding of the offset. */
16838 bytes[n] = o & 0x7f;
16844 /* Add the insn. */
16846 add_unwind_opcode (bytes[n - 1], 1);
16847 add_unwind_opcode (0xb2, 1);
16849 else if (offset > 0x100)
16851 /* Two short opcodes. */
16852 add_unwind_opcode (0x3f, 1);
16853 op = (offset - 0x104) >> 2;
16854 add_unwind_opcode (op, 1);
16856 else if (offset > 0)
16858 /* Short opcode. */
16859 op = (offset - 4) >> 2;
16860 add_unwind_opcode (op, 1);
16862 else if (offset < 0)
16865 while (offset > 0x100)
16867 add_unwind_opcode (0x7f, 1);
16870 op = ((offset - 4) >> 2) | 0x40;
16871 add_unwind_opcode (op, 1);
16875 /* Finish the list of unwind opcodes for this function. */
16877 finish_unwind_opcodes (void)
16881 if (unwind.fp_used)
16883 /* Adjust sp as neccessary. */
16884 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
16885 flush_pending_unwind ();
16887 /* After restoring sp from the frame pointer. */
16888 op = 0x90 | unwind.fp_reg;
16889 add_unwind_opcode (op, 1);
16892 flush_pending_unwind ();
16896 /* Start an exception table entry. If idx is nonzero this is an index table
16900 start_unwind_section (const segT text_seg, int idx)
16902 const char * text_name;
16903 const char * prefix;
16904 const char * prefix_once;
16905 const char * group_name;
16909 size_t sec_name_len;
16916 prefix = ELF_STRING_ARM_unwind;
16917 prefix_once = ELF_STRING_ARM_unwind_once;
16918 type = SHT_ARM_EXIDX;
16922 prefix = ELF_STRING_ARM_unwind_info;
16923 prefix_once = ELF_STRING_ARM_unwind_info_once;
16924 type = SHT_PROGBITS;
16927 text_name = segment_name (text_seg);
16928 if (streq (text_name, ".text"))
16931 if (strncmp (text_name, ".gnu.linkonce.t.",
16932 strlen (".gnu.linkonce.t.")) == 0)
16934 prefix = prefix_once;
16935 text_name += strlen (".gnu.linkonce.t.");
16938 prefix_len = strlen (prefix);
16939 text_len = strlen (text_name);
16940 sec_name_len = prefix_len + text_len;
16941 sec_name = xmalloc (sec_name_len + 1);
16942 memcpy (sec_name, prefix, prefix_len);
16943 memcpy (sec_name + prefix_len, text_name, text_len);
16944 sec_name[prefix_len + text_len] = '\0';
16950 /* Handle COMDAT group. */
16951 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
16953 group_name = elf_group_name (text_seg);
16954 if (group_name == NULL)
16956 as_bad ("Group section `%s' has no group signature",
16957 segment_name (text_seg));
16958 ignore_rest_of_line ();
16961 flags |= SHF_GROUP;
16965 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
16967 /* Set the setion link for index tables. */
16969 elf_linked_to_section (now_seg) = text_seg;
16973 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
16974 personality routine data. Returns zero, or the index table value for
16975 and inline entry. */
16978 create_unwind_entry (int have_data)
16983 /* The current word of data. */
16985 /* The number of bytes left in this word. */
16988 finish_unwind_opcodes ();
16990 /* Remember the current text section. */
16991 unwind.saved_seg = now_seg;
16992 unwind.saved_subseg = now_subseg;
16994 start_unwind_section (now_seg, 0);
16996 if (unwind.personality_routine == NULL)
16998 if (unwind.personality_index == -2)
17001 as_bad (_("handerdata in cantunwind frame"));
17002 return 1; /* EXIDX_CANTUNWIND. */
17005 /* Use a default personality routine if none is specified. */
17006 if (unwind.personality_index == -1)
17008 if (unwind.opcode_count > 3)
17009 unwind.personality_index = 1;
17011 unwind.personality_index = 0;
17014 /* Space for the personality routine entry. */
17015 if (unwind.personality_index == 0)
17017 if (unwind.opcode_count > 3)
17018 as_bad (_("too many unwind opcodes for personality routine 0"));
17022 /* All the data is inline in the index table. */
17025 while (unwind.opcode_count > 0)
17027 unwind.opcode_count--;
17028 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17032 /* Pad with "finish" opcodes. */
17034 data = (data << 8) | 0xb0;
17041 /* We get two opcodes "free" in the first word. */
17042 size = unwind.opcode_count - 2;
17045 /* An extra byte is required for the opcode count. */
17046 size = unwind.opcode_count + 1;
17048 size = (size + 3) >> 2;
17050 as_bad (_("too many unwind opcodes"));
17052 frag_align (2, 0, 0);
17053 record_alignment (now_seg, 2);
17054 unwind.table_entry = expr_build_dot ();
17056 /* Allocate the table entry. */
17057 ptr = frag_more ((size << 2) + 4);
17058 where = frag_now_fix () - ((size << 2) + 4);
17060 switch (unwind.personality_index)
17063 /* ??? Should this be a PLT generating relocation? */
17064 /* Custom personality routine. */
17065 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
17066 BFD_RELOC_ARM_PREL31);
17071 /* Set the first byte to the number of additional words. */
17076 /* ABI defined personality routines. */
17078 /* Three opcodes bytes are packed into the first word. */
17085 /* The size and first two opcode bytes go in the first word. */
17086 data = ((0x80 + unwind.personality_index) << 8) | size;
17091 /* Should never happen. */
17095 /* Pack the opcodes into words (MSB first), reversing the list at the same
17097 while (unwind.opcode_count > 0)
17101 md_number_to_chars (ptr, data, 4);
17106 unwind.opcode_count--;
17108 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
17111 /* Finish off the last word. */
17114 /* Pad with "finish" opcodes. */
17116 data = (data << 8) | 0xb0;
17118 md_number_to_chars (ptr, data, 4);
17123 /* Add an empty descriptor if there is no user-specified data. */
17124 ptr = frag_more (4);
17125 md_number_to_chars (ptr, 0, 4);
17131 /* Convert REGNAME to a DWARF-2 register number. */
17134 tc_arm_regname_to_dw2regnum (const char *regname)
17136 int reg = arm_reg_parse ((char **) ®name, REG_TYPE_RN);
17144 /* Initialize the DWARF-2 unwind information for this procedure. */
17147 tc_arm_frame_initial_instructions (void)
17149 cfi_add_CFA_def_cfa (REG_SP, 0);
17151 #endif /* OBJ_ELF */
17154 /* MD interface: Symbol and relocation handling. */
17156 /* Return the address within the segment that a PC-relative fixup is
17157 relative to. For ARM, PC-relative fixups applied to instructions
17158 are generally relative to the location of the fixup plus 8 bytes.
17159 Thumb branches are offset by 4, and Thumb loads relative to PC
17160 require special handling. */
17163 md_pcrel_from_section (fixS * fixP, segT seg)
17165 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
17167 /* If this is pc-relative and we are going to emit a relocation
17168 then we just want to put out any pipeline compensation that the linker
17169 will need. Otherwise we want to use the calculated base. */
17171 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
17172 || arm_force_relocation (fixP)))
17175 switch (fixP->fx_r_type)
17177 /* PC relative addressing on the Thumb is slightly odd as the
17178 bottom two bits of the PC are forced to zero for the
17179 calculation. This happens *after* application of the
17180 pipeline offset. However, Thumb adrl already adjusts for
17181 this, so we need not do it again. */
17182 case BFD_RELOC_ARM_THUMB_ADD:
17185 case BFD_RELOC_ARM_THUMB_OFFSET:
17186 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17187 case BFD_RELOC_ARM_T32_ADD_PC12:
17188 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
17189 return (base + 4) & ~3;
17191 /* Thumb branches are simply offset by +4. */
17192 case BFD_RELOC_THUMB_PCREL_BRANCH7:
17193 case BFD_RELOC_THUMB_PCREL_BRANCH9:
17194 case BFD_RELOC_THUMB_PCREL_BRANCH12:
17195 case BFD_RELOC_THUMB_PCREL_BRANCH20:
17196 case BFD_RELOC_THUMB_PCREL_BRANCH23:
17197 case BFD_RELOC_THUMB_PCREL_BRANCH25:
17198 case BFD_RELOC_THUMB_PCREL_BLX:
17201 /* ARM mode branches are offset by +8. However, the Windows CE
17202 loader expects the relocation not to take this into account. */
17203 case BFD_RELOC_ARM_PCREL_BRANCH:
17204 case BFD_RELOC_ARM_PCREL_CALL:
17205 case BFD_RELOC_ARM_PCREL_JUMP:
17206 case BFD_RELOC_ARM_PCREL_BLX:
17207 case BFD_RELOC_ARM_PLT32:
17214 /* ARM mode loads relative to PC are also offset by +8. Unlike
17215 branches, the Windows CE loader *does* expect the relocation
17216 to take this into account. */
17217 case BFD_RELOC_ARM_OFFSET_IMM:
17218 case BFD_RELOC_ARM_OFFSET_IMM8:
17219 case BFD_RELOC_ARM_HWLITERAL:
17220 case BFD_RELOC_ARM_LITERAL:
17221 case BFD_RELOC_ARM_CP_OFF_IMM:
17225 /* Other PC-relative relocations are un-offset. */
17231 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
17232 Otherwise we have no need to default values of symbols. */
17235 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
17238 if (name[0] == '_' && name[1] == 'G'
17239 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
17243 if (symbol_find (name))
17244 as_bad ("GOT already in the symbol table");
17246 GOT_symbol = symbol_new (name, undefined_section,
17247 (valueT) 0, & zero_address_frag);
17257 /* Subroutine of md_apply_fix. Check to see if an immediate can be
17258 computed as two separate immediate values, added together. We
17259 already know that this value cannot be computed by just one ARM
17262 static unsigned int
17263 validate_immediate_twopart (unsigned int val,
17264 unsigned int * highpart)
17269 for (i = 0; i < 32; i += 2)
17270 if (((a = rotate_left (val, i)) & 0xff) != 0)
17276 * highpart = (a >> 8) | ((i + 24) << 7);
17278 else if (a & 0xff0000)
17280 if (a & 0xff000000)
17282 * highpart = (a >> 16) | ((i + 16) << 7);
17286 assert (a & 0xff000000);
17287 * highpart = (a >> 24) | ((i + 8) << 7);
17290 return (a & 0xff) | (i << 7);
17297 validate_offset_imm (unsigned int val, int hwse)
17299 if ((hwse && val > 255) || val > 4095)
17304 /* Subroutine of md_apply_fix. Do those data_ops which can take a
17305 negative immediate constant by altering the instruction. A bit of
17310 by inverting the second operand, and
17313 by negating the second operand. */
17316 negate_data_op (unsigned long * instruction,
17317 unsigned long value)
17320 unsigned long negated, inverted;
17322 negated = encode_arm_immediate (-value);
17323 inverted = encode_arm_immediate (~value);
17325 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
17328 /* First negates. */
17329 case OPCODE_SUB: /* ADD <-> SUB */
17330 new_inst = OPCODE_ADD;
17335 new_inst = OPCODE_SUB;
17339 case OPCODE_CMP: /* CMP <-> CMN */
17340 new_inst = OPCODE_CMN;
17345 new_inst = OPCODE_CMP;
17349 /* Now Inverted ops. */
17350 case OPCODE_MOV: /* MOV <-> MVN */
17351 new_inst = OPCODE_MVN;
17356 new_inst = OPCODE_MOV;
17360 case OPCODE_AND: /* AND <-> BIC */
17361 new_inst = OPCODE_BIC;
17366 new_inst = OPCODE_AND;
17370 case OPCODE_ADC: /* ADC <-> SBC */
17371 new_inst = OPCODE_SBC;
17376 new_inst = OPCODE_ADC;
17380 /* We cannot do anything. */
17385 if (value == (unsigned) FAIL)
17388 *instruction &= OPCODE_MASK;
17389 *instruction |= new_inst << DATA_OP_SHIFT;
17393 /* Like negate_data_op, but for Thumb-2. */
17395 static unsigned int
17396 thumb32_negate_data_op (offsetT *instruction, unsigned int value)
17400 unsigned int negated, inverted;
17402 negated = encode_thumb32_immediate (-value);
17403 inverted = encode_thumb32_immediate (~value);
17405 rd = (*instruction >> 8) & 0xf;
17406 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
17409 /* ADD <-> SUB. Includes CMP <-> CMN. */
17410 case T2_OPCODE_SUB:
17411 new_inst = T2_OPCODE_ADD;
17415 case T2_OPCODE_ADD:
17416 new_inst = T2_OPCODE_SUB;
17420 /* ORR <-> ORN. Includes MOV <-> MVN. */
17421 case T2_OPCODE_ORR:
17422 new_inst = T2_OPCODE_ORN;
17426 case T2_OPCODE_ORN:
17427 new_inst = T2_OPCODE_ORR;
17431 /* AND <-> BIC. TST has no inverted equivalent. */
17432 case T2_OPCODE_AND:
17433 new_inst = T2_OPCODE_BIC;
17440 case T2_OPCODE_BIC:
17441 new_inst = T2_OPCODE_AND;
17446 case T2_OPCODE_ADC:
17447 new_inst = T2_OPCODE_SBC;
17451 case T2_OPCODE_SBC:
17452 new_inst = T2_OPCODE_ADC;
17456 /* We cannot do anything. */
17461 if (value == (unsigned int)FAIL)
17464 *instruction &= T2_OPCODE_MASK;
17465 *instruction |= new_inst << T2_DATA_OP_SHIFT;
17469 /* Read a 32-bit thumb instruction from buf. */
17470 static unsigned long
17471 get_thumb32_insn (char * buf)
17473 unsigned long insn;
17474 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
17475 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
17481 /* We usually want to set the low bit on the address of thumb function
17482 symbols. In particular .word foo - . should have the low bit set.
17483 Generic code tries to fold the difference of two symbols to
17484 a constant. Prevent this and force a relocation when the first symbols
17485 is a thumb function. */
17487 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
17489 if (op == O_subtract
17490 && l->X_op == O_symbol
17491 && r->X_op == O_symbol
17492 && THUMB_IS_FUNC (l->X_add_symbol))
17494 l->X_op = O_subtract;
17495 l->X_op_symbol = r->X_add_symbol;
17496 l->X_add_number -= r->X_add_number;
17499 /* Process as normal. */
17504 md_apply_fix (fixS * fixP,
17508 offsetT value = * valP;
17510 unsigned int newimm;
17511 unsigned long temp;
17513 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
17515 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
17517 /* Note whether this will delete the relocation. */
17519 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
17522 /* On a 64-bit host, silently truncate 'value' to 32 bits for
17523 consistency with the behavior on 32-bit hosts. Remember value
17525 value &= 0xffffffff;
17526 value ^= 0x80000000;
17527 value -= 0x80000000;
17530 fixP->fx_addnumber = value;
17532 /* Same treatment for fixP->fx_offset. */
17533 fixP->fx_offset &= 0xffffffff;
17534 fixP->fx_offset ^= 0x80000000;
17535 fixP->fx_offset -= 0x80000000;
17537 switch (fixP->fx_r_type)
17539 case BFD_RELOC_NONE:
17540 /* This will need to go in the object file. */
17544 case BFD_RELOC_ARM_IMMEDIATE:
17545 /* We claim that this fixup has been processed here,
17546 even if in fact we generate an error because we do
17547 not have a reloc for it, so tc_gen_reloc will reject it. */
17551 && ! S_IS_DEFINED (fixP->fx_addsy))
17553 as_bad_where (fixP->fx_file, fixP->fx_line,
17554 _("undefined symbol %s used as an immediate value"),
17555 S_GET_NAME (fixP->fx_addsy));
17559 newimm = encode_arm_immediate (value);
17560 temp = md_chars_to_number (buf, INSN_SIZE);
17562 /* If the instruction will fail, see if we can fix things up by
17563 changing the opcode. */
17564 if (newimm == (unsigned int) FAIL
17565 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
17567 as_bad_where (fixP->fx_file, fixP->fx_line,
17568 _("invalid constant (%lx) after fixup"),
17569 (unsigned long) value);
17573 newimm |= (temp & 0xfffff000);
17574 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17577 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17579 unsigned int highpart = 0;
17580 unsigned int newinsn = 0xe1a00000; /* nop. */
17582 newimm = encode_arm_immediate (value);
17583 temp = md_chars_to_number (buf, INSN_SIZE);
17585 /* If the instruction will fail, see if we can fix things up by
17586 changing the opcode. */
17587 if (newimm == (unsigned int) FAIL
17588 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
17590 /* No ? OK - try using two ADD instructions to generate
17592 newimm = validate_immediate_twopart (value, & highpart);
17594 /* Yes - then make sure that the second instruction is
17596 if (newimm != (unsigned int) FAIL)
17598 /* Still No ? Try using a negated value. */
17599 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
17600 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
17601 /* Otherwise - give up. */
17604 as_bad_where (fixP->fx_file, fixP->fx_line,
17605 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
17610 /* Replace the first operand in the 2nd instruction (which
17611 is the PC) with the destination register. We have
17612 already added in the PC in the first instruction and we
17613 do not want to do it again. */
17614 newinsn &= ~ 0xf0000;
17615 newinsn |= ((newinsn & 0x0f000) << 4);
17618 newimm |= (temp & 0xfffff000);
17619 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
17621 highpart |= (newinsn & 0xfffff000);
17622 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
17626 case BFD_RELOC_ARM_OFFSET_IMM:
17627 if (!fixP->fx_done && seg->use_rela_p)
17630 case BFD_RELOC_ARM_LITERAL:
17636 if (validate_offset_imm (value, 0) == FAIL)
17638 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
17639 as_bad_where (fixP->fx_file, fixP->fx_line,
17640 _("invalid literal constant: pool needs to be closer"));
17642 as_bad_where (fixP->fx_file, fixP->fx_line,
17643 _("bad immediate value for offset (%ld)"),
17648 newval = md_chars_to_number (buf, INSN_SIZE);
17649 newval &= 0xff7ff000;
17650 newval |= value | (sign ? INDEX_UP : 0);
17651 md_number_to_chars (buf, newval, INSN_SIZE);
17654 case BFD_RELOC_ARM_OFFSET_IMM8:
17655 case BFD_RELOC_ARM_HWLITERAL:
17661 if (validate_offset_imm (value, 1) == FAIL)
17663 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
17664 as_bad_where (fixP->fx_file, fixP->fx_line,
17665 _("invalid literal constant: pool needs to be closer"));
17667 as_bad (_("bad immediate value for half-word offset (%ld)"),
17672 newval = md_chars_to_number (buf, INSN_SIZE);
17673 newval &= 0xff7ff0f0;
17674 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
17675 md_number_to_chars (buf, newval, INSN_SIZE);
17678 case BFD_RELOC_ARM_T32_OFFSET_U8:
17679 if (value < 0 || value > 1020 || value % 4 != 0)
17680 as_bad_where (fixP->fx_file, fixP->fx_line,
17681 _("bad immediate value for offset (%ld)"), (long) value);
17684 newval = md_chars_to_number (buf+2, THUMB_SIZE);
17686 md_number_to_chars (buf+2, newval, THUMB_SIZE);
17689 case BFD_RELOC_ARM_T32_OFFSET_IMM:
17690 /* This is a complicated relocation used for all varieties of Thumb32
17691 load/store instruction with immediate offset:
17693 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
17694 *4, optional writeback(W)
17695 (doubleword load/store)
17697 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
17698 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
17699 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
17700 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
17701 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
17703 Uppercase letters indicate bits that are already encoded at
17704 this point. Lowercase letters are our problem. For the
17705 second block of instructions, the secondary opcode nybble
17706 (bits 8..11) is present, and bit 23 is zero, even if this is
17707 a PC-relative operation. */
17708 newval = md_chars_to_number (buf, THUMB_SIZE);
17710 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
17712 if ((newval & 0xf0000000) == 0xe0000000)
17714 /* Doubleword load/store: 8-bit offset, scaled by 4. */
17716 newval |= (1 << 23);
17719 if (value % 4 != 0)
17721 as_bad_where (fixP->fx_file, fixP->fx_line,
17722 _("offset not a multiple of 4"));
17728 as_bad_where (fixP->fx_file, fixP->fx_line,
17729 _("offset out of range"));
17734 else if ((newval & 0x000f0000) == 0x000f0000)
17736 /* PC-relative, 12-bit offset. */
17738 newval |= (1 << 23);
17743 as_bad_where (fixP->fx_file, fixP->fx_line,
17744 _("offset out of range"));
17749 else if ((newval & 0x00000100) == 0x00000100)
17751 /* Writeback: 8-bit, +/- offset. */
17753 newval |= (1 << 9);
17758 as_bad_where (fixP->fx_file, fixP->fx_line,
17759 _("offset out of range"));
17764 else if ((newval & 0x00000f00) == 0x00000e00)
17766 /* T-instruction: positive 8-bit offset. */
17767 if (value < 0 || value > 0xff)
17769 as_bad_where (fixP->fx_file, fixP->fx_line,
17770 _("offset out of range"));
17778 /* Positive 12-bit or negative 8-bit offset. */
17782 newval |= (1 << 23);
17792 as_bad_where (fixP->fx_file, fixP->fx_line,
17793 _("offset out of range"));
17800 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
17801 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
17804 case BFD_RELOC_ARM_SHIFT_IMM:
17805 newval = md_chars_to_number (buf, INSN_SIZE);
17806 if (((unsigned long) value) > 32
17808 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
17810 as_bad_where (fixP->fx_file, fixP->fx_line,
17811 _("shift expression is too large"));
17816 /* Shifts of zero must be done as lsl. */
17818 else if (value == 32)
17820 newval &= 0xfffff07f;
17821 newval |= (value & 0x1f) << 7;
17822 md_number_to_chars (buf, newval, INSN_SIZE);
17825 case BFD_RELOC_ARM_T32_IMMEDIATE:
17826 case BFD_RELOC_ARM_T32_ADD_IMM:
17827 case BFD_RELOC_ARM_T32_IMM12:
17828 case BFD_RELOC_ARM_T32_ADD_PC12:
17829 /* We claim that this fixup has been processed here,
17830 even if in fact we generate an error because we do
17831 not have a reloc for it, so tc_gen_reloc will reject it. */
17835 && ! S_IS_DEFINED (fixP->fx_addsy))
17837 as_bad_where (fixP->fx_file, fixP->fx_line,
17838 _("undefined symbol %s used as an immediate value"),
17839 S_GET_NAME (fixP->fx_addsy));
17843 newval = md_chars_to_number (buf, THUMB_SIZE);
17845 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
17848 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17849 || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17851 newimm = encode_thumb32_immediate (value);
17852 if (newimm == (unsigned int) FAIL)
17853 newimm = thumb32_negate_data_op (&newval, value);
17855 if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE
17856 && newimm == (unsigned int) FAIL)
17858 /* Turn add/sum into addw/subw. */
17859 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
17860 newval = (newval & 0xfeffffff) | 0x02000000;
17862 /* 12 bit immediate for addw/subw. */
17866 newval ^= 0x00a00000;
17869 newimm = (unsigned int) FAIL;
17874 if (newimm == (unsigned int)FAIL)
17876 as_bad_where (fixP->fx_file, fixP->fx_line,
17877 _("invalid constant (%lx) after fixup"),
17878 (unsigned long) value);
17882 newval |= (newimm & 0x800) << 15;
17883 newval |= (newimm & 0x700) << 4;
17884 newval |= (newimm & 0x0ff);
17886 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
17887 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
17890 case BFD_RELOC_ARM_SMC:
17891 if (((unsigned long) value) > 0xffff)
17892 as_bad_where (fixP->fx_file, fixP->fx_line,
17893 _("invalid smc expression"));
17894 newval = md_chars_to_number (buf, INSN_SIZE);
17895 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
17896 md_number_to_chars (buf, newval, INSN_SIZE);
17899 case BFD_RELOC_ARM_SWI:
17900 if (fixP->tc_fix_data != 0)
17902 if (((unsigned long) value) > 0xff)
17903 as_bad_where (fixP->fx_file, fixP->fx_line,
17904 _("invalid swi expression"));
17905 newval = md_chars_to_number (buf, THUMB_SIZE);
17907 md_number_to_chars (buf, newval, THUMB_SIZE);
17911 if (((unsigned long) value) > 0x00ffffff)
17912 as_bad_where (fixP->fx_file, fixP->fx_line,
17913 _("invalid swi expression"));
17914 newval = md_chars_to_number (buf, INSN_SIZE);
17916 md_number_to_chars (buf, newval, INSN_SIZE);
17920 case BFD_RELOC_ARM_MULTI:
17921 if (((unsigned long) value) > 0xffff)
17922 as_bad_where (fixP->fx_file, fixP->fx_line,
17923 _("invalid expression in load/store multiple"));
17924 newval = value | md_chars_to_number (buf, INSN_SIZE);
17925 md_number_to_chars (buf, newval, INSN_SIZE);
17929 case BFD_RELOC_ARM_PCREL_CALL:
17930 newval = md_chars_to_number (buf, INSN_SIZE);
17931 if ((newval & 0xf0000000) == 0xf0000000)
17935 goto arm_branch_common;
17937 case BFD_RELOC_ARM_PCREL_JUMP:
17938 case BFD_RELOC_ARM_PLT32:
17940 case BFD_RELOC_ARM_PCREL_BRANCH:
17942 goto arm_branch_common;
17944 case BFD_RELOC_ARM_PCREL_BLX:
17947 /* We are going to store value (shifted right by two) in the
17948 instruction, in a 24 bit, signed field. Bits 26 through 32 either
17949 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
17950 also be be clear. */
17952 as_bad_where (fixP->fx_file, fixP->fx_line,
17953 _("misaligned branch destination"));
17954 if ((value & (offsetT)0xfe000000) != (offsetT)0
17955 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
17956 as_bad_where (fixP->fx_file, fixP->fx_line,
17957 _("branch out of range"));
17959 if (fixP->fx_done || !seg->use_rela_p)
17961 newval = md_chars_to_number (buf, INSN_SIZE);
17962 newval |= (value >> 2) & 0x00ffffff;
17963 /* Set the H bit on BLX instructions. */
17967 newval |= 0x01000000;
17969 newval &= ~0x01000000;
17971 md_number_to_chars (buf, newval, INSN_SIZE);
17975 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
17976 /* CZB can only branch forward. */
17978 as_bad_where (fixP->fx_file, fixP->fx_line,
17979 _("branch out of range"));
17981 if (fixP->fx_done || !seg->use_rela_p)
17983 newval = md_chars_to_number (buf, THUMB_SIZE);
17984 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
17985 md_number_to_chars (buf, newval, THUMB_SIZE);
17989 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
17990 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
17991 as_bad_where (fixP->fx_file, fixP->fx_line,
17992 _("branch out of range"));
17994 if (fixP->fx_done || !seg->use_rela_p)
17996 newval = md_chars_to_number (buf, THUMB_SIZE);
17997 newval |= (value & 0x1ff) >> 1;
17998 md_number_to_chars (buf, newval, THUMB_SIZE);
18002 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
18003 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
18004 as_bad_where (fixP->fx_file, fixP->fx_line,
18005 _("branch out of range"));
18007 if (fixP->fx_done || !seg->use_rela_p)
18009 newval = md_chars_to_number (buf, THUMB_SIZE);
18010 newval |= (value & 0xfff) >> 1;
18011 md_number_to_chars (buf, newval, THUMB_SIZE);
18015 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18016 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
18017 as_bad_where (fixP->fx_file, fixP->fx_line,
18018 _("conditional branch out of range"));
18020 if (fixP->fx_done || !seg->use_rela_p)
18023 addressT S, J1, J2, lo, hi;
18025 S = (value & 0x00100000) >> 20;
18026 J2 = (value & 0x00080000) >> 19;
18027 J1 = (value & 0x00040000) >> 18;
18028 hi = (value & 0x0003f000) >> 12;
18029 lo = (value & 0x00000ffe) >> 1;
18031 newval = md_chars_to_number (buf, THUMB_SIZE);
18032 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18033 newval |= (S << 10) | hi;
18034 newval2 |= (J1 << 13) | (J2 << 11) | lo;
18035 md_number_to_chars (buf, newval, THUMB_SIZE);
18036 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18040 case BFD_RELOC_THUMB_PCREL_BLX:
18041 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18042 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
18043 as_bad_where (fixP->fx_file, fixP->fx_line,
18044 _("branch out of range"));
18046 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
18047 /* For a BLX instruction, make sure that the relocation is rounded up
18048 to a word boundary. This follows the semantics of the instruction
18049 which specifies that bit 1 of the target address will come from bit
18050 1 of the base address. */
18051 value = (value + 1) & ~ 1;
18053 if (fixP->fx_done || !seg->use_rela_p)
18057 newval = md_chars_to_number (buf, THUMB_SIZE);
18058 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18059 newval |= (value & 0x7fffff) >> 12;
18060 newval2 |= (value & 0xfff) >> 1;
18061 md_number_to_chars (buf, newval, THUMB_SIZE);
18062 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18066 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18067 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
18068 as_bad_where (fixP->fx_file, fixP->fx_line,
18069 _("branch out of range"));
18071 if (fixP->fx_done || !seg->use_rela_p)
18074 addressT S, I1, I2, lo, hi;
18076 S = (value & 0x01000000) >> 24;
18077 I1 = (value & 0x00800000) >> 23;
18078 I2 = (value & 0x00400000) >> 22;
18079 hi = (value & 0x003ff000) >> 12;
18080 lo = (value & 0x00000ffe) >> 1;
18085 newval = md_chars_to_number (buf, THUMB_SIZE);
18086 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
18087 newval |= (S << 10) | hi;
18088 newval2 |= (I1 << 13) | (I2 << 11) | lo;
18089 md_number_to_chars (buf, newval, THUMB_SIZE);
18090 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
18095 if (fixP->fx_done || !seg->use_rela_p)
18096 md_number_to_chars (buf, value, 1);
18100 if (fixP->fx_done || !seg->use_rela_p)
18101 md_number_to_chars (buf, value, 2);
18105 case BFD_RELOC_ARM_TLS_GD32:
18106 case BFD_RELOC_ARM_TLS_LE32:
18107 case BFD_RELOC_ARM_TLS_IE32:
18108 case BFD_RELOC_ARM_TLS_LDM32:
18109 case BFD_RELOC_ARM_TLS_LDO32:
18110 S_SET_THREAD_LOCAL (fixP->fx_addsy);
18113 case BFD_RELOC_ARM_GOT32:
18114 case BFD_RELOC_ARM_GOTOFF:
18115 case BFD_RELOC_ARM_TARGET2:
18116 if (fixP->fx_done || !seg->use_rela_p)
18117 md_number_to_chars (buf, 0, 4);
18121 case BFD_RELOC_RVA:
18123 case BFD_RELOC_ARM_TARGET1:
18124 case BFD_RELOC_ARM_ROSEGREL32:
18125 case BFD_RELOC_ARM_SBREL32:
18126 case BFD_RELOC_32_PCREL:
18127 if (fixP->fx_done || !seg->use_rela_p)
18128 md_number_to_chars (buf, value, 4);
18132 case BFD_RELOC_ARM_PREL31:
18133 if (fixP->fx_done || !seg->use_rela_p)
18135 newval = md_chars_to_number (buf, 4) & 0x80000000;
18136 if ((value ^ (value >> 1)) & 0x40000000)
18138 as_bad_where (fixP->fx_file, fixP->fx_line,
18139 _("rel31 relocation overflow"));
18141 newval |= value & 0x7fffffff;
18142 md_number_to_chars (buf, newval, 4);
18147 case BFD_RELOC_ARM_CP_OFF_IMM:
18148 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
18149 if (value < -1023 || value > 1023 || (value & 3))
18150 as_bad_where (fixP->fx_file, fixP->fx_line,
18151 _("co-processor offset out of range"));
18156 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18157 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18158 newval = md_chars_to_number (buf, INSN_SIZE);
18160 newval = get_thumb32_insn (buf);
18161 newval &= 0xff7fff00;
18162 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
18164 newval &= ~WRITE_BACK;
18165 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
18166 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
18167 md_number_to_chars (buf, newval, INSN_SIZE);
18169 put_thumb32_insn (buf, newval);
18172 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
18173 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
18174 if (value < -255 || value > 255)
18175 as_bad_where (fixP->fx_file, fixP->fx_line,
18176 _("co-processor offset out of range"));
18178 goto cp_off_common;
18180 case BFD_RELOC_ARM_THUMB_OFFSET:
18181 newval = md_chars_to_number (buf, THUMB_SIZE);
18182 /* Exactly what ranges, and where the offset is inserted depends
18183 on the type of instruction, we can establish this from the
18185 switch (newval >> 12)
18187 case 4: /* PC load. */
18188 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
18189 forced to zero for these loads; md_pcrel_from has already
18190 compensated for this. */
18192 as_bad_where (fixP->fx_file, fixP->fx_line,
18193 _("invalid offset, target not word aligned (0x%08lX)"),
18194 (((unsigned long) fixP->fx_frag->fr_address
18195 + (unsigned long) fixP->fx_where) & ~3)
18196 + (unsigned long) value);
18198 if (value & ~0x3fc)
18199 as_bad_where (fixP->fx_file, fixP->fx_line,
18200 _("invalid offset, value too big (0x%08lX)"),
18203 newval |= value >> 2;
18206 case 9: /* SP load/store. */
18207 if (value & ~0x3fc)
18208 as_bad_where (fixP->fx_file, fixP->fx_line,
18209 _("invalid offset, value too big (0x%08lX)"),
18211 newval |= value >> 2;
18214 case 6: /* Word load/store. */
18216 as_bad_where (fixP->fx_file, fixP->fx_line,
18217 _("invalid offset, value too big (0x%08lX)"),
18219 newval |= value << 4; /* 6 - 2. */
18222 case 7: /* Byte load/store. */
18224 as_bad_where (fixP->fx_file, fixP->fx_line,
18225 _("invalid offset, value too big (0x%08lX)"),
18227 newval |= value << 6;
18230 case 8: /* Halfword load/store. */
18232 as_bad_where (fixP->fx_file, fixP->fx_line,
18233 _("invalid offset, value too big (0x%08lX)"),
18235 newval |= value << 5; /* 6 - 1. */
18239 as_bad_where (fixP->fx_file, fixP->fx_line,
18240 "Unable to process relocation for thumb opcode: %lx",
18241 (unsigned long) newval);
18244 md_number_to_chars (buf, newval, THUMB_SIZE);
18247 case BFD_RELOC_ARM_THUMB_ADD:
18248 /* This is a complicated relocation, since we use it for all of
18249 the following immediate relocations:
18253 9bit ADD/SUB SP word-aligned
18254 10bit ADD PC/SP word-aligned
18256 The type of instruction being processed is encoded in the
18263 newval = md_chars_to_number (buf, THUMB_SIZE);
18265 int rd = (newval >> 4) & 0xf;
18266 int rs = newval & 0xf;
18267 int subtract = !!(newval & 0x8000);
18269 /* Check for HI regs, only very restricted cases allowed:
18270 Adjusting SP, and using PC or SP to get an address. */
18271 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
18272 || (rs > 7 && rs != REG_SP && rs != REG_PC))
18273 as_bad_where (fixP->fx_file, fixP->fx_line,
18274 _("invalid Hi register with immediate"));
18276 /* If value is negative, choose the opposite instruction. */
18280 subtract = !subtract;
18282 as_bad_where (fixP->fx_file, fixP->fx_line,
18283 _("immediate value out of range"));
18288 if (value & ~0x1fc)
18289 as_bad_where (fixP->fx_file, fixP->fx_line,
18290 _("invalid immediate for stack address calculation"));
18291 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
18292 newval |= value >> 2;
18294 else if (rs == REG_PC || rs == REG_SP)
18296 if (subtract || value & ~0x3fc)
18297 as_bad_where (fixP->fx_file, fixP->fx_line,
18298 _("invalid immediate for address calculation (value = 0x%08lX)"),
18299 (unsigned long) value);
18300 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
18302 newval |= value >> 2;
18307 as_bad_where (fixP->fx_file, fixP->fx_line,
18308 _("immediate value out of range"));
18309 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
18310 newval |= (rd << 8) | value;
18315 as_bad_where (fixP->fx_file, fixP->fx_line,
18316 _("immediate value out of range"));
18317 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
18318 newval |= rd | (rs << 3) | (value << 6);
18321 md_number_to_chars (buf, newval, THUMB_SIZE);
18324 case BFD_RELOC_ARM_THUMB_IMM:
18325 newval = md_chars_to_number (buf, THUMB_SIZE);
18326 if (value < 0 || value > 255)
18327 as_bad_where (fixP->fx_file, fixP->fx_line,
18328 _("invalid immediate: %ld is too large"),
18331 md_number_to_chars (buf, newval, THUMB_SIZE);
18334 case BFD_RELOC_ARM_THUMB_SHIFT:
18335 /* 5bit shift value (0..32). LSL cannot take 32. */
18336 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
18337 temp = newval & 0xf800;
18338 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
18339 as_bad_where (fixP->fx_file, fixP->fx_line,
18340 _("invalid shift value: %ld"), (long) value);
18341 /* Shifts of zero must be encoded as LSL. */
18343 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
18344 /* Shifts of 32 are encoded as zero. */
18345 else if (value == 32)
18347 newval |= value << 6;
18348 md_number_to_chars (buf, newval, THUMB_SIZE);
18351 case BFD_RELOC_VTABLE_INHERIT:
18352 case BFD_RELOC_VTABLE_ENTRY:
18356 case BFD_RELOC_ARM_MOVW:
18357 case BFD_RELOC_ARM_MOVT:
18358 case BFD_RELOC_ARM_THUMB_MOVW:
18359 case BFD_RELOC_ARM_THUMB_MOVT:
18360 if (fixP->fx_done || !seg->use_rela_p)
18362 /* REL format relocations are limited to a 16-bit addend. */
18363 if (!fixP->fx_done)
18365 if (value < -0x1000 || value > 0xffff)
18366 as_bad_where (fixP->fx_file, fixP->fx_line,
18367 _("offset too big"));
18369 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
18370 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18375 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
18376 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
18378 newval = get_thumb32_insn (buf);
18379 newval &= 0xfbf08f00;
18380 newval |= (value & 0xf000) << 4;
18381 newval |= (value & 0x0800) << 15;
18382 newval |= (value & 0x0700) << 4;
18383 newval |= (value & 0x00ff);
18384 put_thumb32_insn (buf, newval);
18388 newval = md_chars_to_number (buf, 4);
18389 newval &= 0xfff0f000;
18390 newval |= value & 0x0fff;
18391 newval |= (value & 0xf000) << 4;
18392 md_number_to_chars (buf, newval, 4);
18397 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18398 case BFD_RELOC_ARM_ALU_PC_G0:
18399 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18400 case BFD_RELOC_ARM_ALU_PC_G1:
18401 case BFD_RELOC_ARM_ALU_PC_G2:
18402 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18403 case BFD_RELOC_ARM_ALU_SB_G0:
18404 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18405 case BFD_RELOC_ARM_ALU_SB_G1:
18406 case BFD_RELOC_ARM_ALU_SB_G2:
18407 assert (!fixP->fx_done);
18408 if (!seg->use_rela_p)
18411 bfd_vma encoded_addend;
18412 bfd_vma addend_abs = abs (value);
18414 /* Check that the absolute value of the addend can be
18415 expressed as an 8-bit constant plus a rotation. */
18416 encoded_addend = encode_arm_immediate (addend_abs);
18417 if (encoded_addend == (unsigned int) FAIL)
18418 as_bad_where (fixP->fx_file, fixP->fx_line,
18419 _("the offset 0x%08lX is not representable"),
18422 /* Extract the instruction. */
18423 insn = md_chars_to_number (buf, INSN_SIZE);
18425 /* If the addend is positive, use an ADD instruction.
18426 Otherwise use a SUB. Take care not to destroy the S bit. */
18427 insn &= 0xff1fffff;
18433 /* Place the encoded addend into the first 12 bits of the
18435 insn &= 0xfffff000;
18436 insn |= encoded_addend;
18438 /* Update the instruction. */
18439 md_number_to_chars (buf, insn, INSN_SIZE);
18443 case BFD_RELOC_ARM_LDR_PC_G0:
18444 case BFD_RELOC_ARM_LDR_PC_G1:
18445 case BFD_RELOC_ARM_LDR_PC_G2:
18446 case BFD_RELOC_ARM_LDR_SB_G0:
18447 case BFD_RELOC_ARM_LDR_SB_G1:
18448 case BFD_RELOC_ARM_LDR_SB_G2:
18449 assert (!fixP->fx_done);
18450 if (!seg->use_rela_p)
18453 bfd_vma addend_abs = abs (value);
18455 /* Check that the absolute value of the addend can be
18456 encoded in 12 bits. */
18457 if (addend_abs >= 0x1000)
18458 as_bad_where (fixP->fx_file, fixP->fx_line,
18459 _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
18462 /* Extract the instruction. */
18463 insn = md_chars_to_number (buf, INSN_SIZE);
18465 /* If the addend is negative, clear bit 23 of the instruction.
18466 Otherwise set it. */
18468 insn &= ~(1 << 23);
18472 /* Place the absolute value of the addend into the first 12 bits
18473 of the instruction. */
18474 insn &= 0xfffff000;
18475 insn |= addend_abs;
18477 /* Update the instruction. */
18478 md_number_to_chars (buf, insn, INSN_SIZE);
18482 case BFD_RELOC_ARM_LDRS_PC_G0:
18483 case BFD_RELOC_ARM_LDRS_PC_G1:
18484 case BFD_RELOC_ARM_LDRS_PC_G2:
18485 case BFD_RELOC_ARM_LDRS_SB_G0:
18486 case BFD_RELOC_ARM_LDRS_SB_G1:
18487 case BFD_RELOC_ARM_LDRS_SB_G2:
18488 assert (!fixP->fx_done);
18489 if (!seg->use_rela_p)
18492 bfd_vma addend_abs = abs (value);
18494 /* Check that the absolute value of the addend can be
18495 encoded in 8 bits. */
18496 if (addend_abs >= 0x100)
18497 as_bad_where (fixP->fx_file, fixP->fx_line,
18498 _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
18501 /* Extract the instruction. */
18502 insn = md_chars_to_number (buf, INSN_SIZE);
18504 /* If the addend is negative, clear bit 23 of the instruction.
18505 Otherwise set it. */
18507 insn &= ~(1 << 23);
18511 /* Place the first four bits of the absolute value of the addend
18512 into the first 4 bits of the instruction, and the remaining
18513 four into bits 8 .. 11. */
18514 insn &= 0xfffff0f0;
18515 insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
18517 /* Update the instruction. */
18518 md_number_to_chars (buf, insn, INSN_SIZE);
18522 case BFD_RELOC_ARM_LDC_PC_G0:
18523 case BFD_RELOC_ARM_LDC_PC_G1:
18524 case BFD_RELOC_ARM_LDC_PC_G2:
18525 case BFD_RELOC_ARM_LDC_SB_G0:
18526 case BFD_RELOC_ARM_LDC_SB_G1:
18527 case BFD_RELOC_ARM_LDC_SB_G2:
18528 assert (!fixP->fx_done);
18529 if (!seg->use_rela_p)
18532 bfd_vma addend_abs = abs (value);
18534 /* Check that the absolute value of the addend is a multiple of
18535 four and, when divided by four, fits in 8 bits. */
18536 if (addend_abs & 0x3)
18537 as_bad_where (fixP->fx_file, fixP->fx_line,
18538 _("bad offset 0x%08lX (must be word-aligned)"),
18541 if ((addend_abs >> 2) > 0xff)
18542 as_bad_where (fixP->fx_file, fixP->fx_line,
18543 _("bad offset 0x%08lX (must be an 8-bit number of words)"),
18546 /* Extract the instruction. */
18547 insn = md_chars_to_number (buf, INSN_SIZE);
18549 /* If the addend is negative, clear bit 23 of the instruction.
18550 Otherwise set it. */
18552 insn &= ~(1 << 23);
18556 /* Place the addend (divided by four) into the first eight
18557 bits of the instruction. */
18558 insn &= 0xfffffff0;
18559 insn |= addend_abs >> 2;
18561 /* Update the instruction. */
18562 md_number_to_chars (buf, insn, INSN_SIZE);
18566 case BFD_RELOC_UNUSED:
18568 as_bad_where (fixP->fx_file, fixP->fx_line,
18569 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
18573 /* Translate internal representation of relocation info to BFD target
18577 tc_gen_reloc (asection *section, fixS *fixp)
18580 bfd_reloc_code_real_type code;
18582 reloc = xmalloc (sizeof (arelent));
18584 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
18585 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
18586 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
18588 if (fixp->fx_pcrel)
18590 if (section->use_rela_p)
18591 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
18593 fixp->fx_offset = reloc->address;
18595 reloc->addend = fixp->fx_offset;
18597 switch (fixp->fx_r_type)
18600 if (fixp->fx_pcrel)
18602 code = BFD_RELOC_8_PCREL;
18607 if (fixp->fx_pcrel)
18609 code = BFD_RELOC_16_PCREL;
18614 if (fixp->fx_pcrel)
18616 code = BFD_RELOC_32_PCREL;
18620 case BFD_RELOC_ARM_MOVW:
18621 if (fixp->fx_pcrel)
18623 code = BFD_RELOC_ARM_MOVW_PCREL;
18627 case BFD_RELOC_ARM_MOVT:
18628 if (fixp->fx_pcrel)
18630 code = BFD_RELOC_ARM_MOVT_PCREL;
18634 case BFD_RELOC_ARM_THUMB_MOVW:
18635 if (fixp->fx_pcrel)
18637 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
18641 case BFD_RELOC_ARM_THUMB_MOVT:
18642 if (fixp->fx_pcrel)
18644 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
18648 case BFD_RELOC_NONE:
18649 case BFD_RELOC_ARM_PCREL_BRANCH:
18650 case BFD_RELOC_ARM_PCREL_BLX:
18651 case BFD_RELOC_RVA:
18652 case BFD_RELOC_THUMB_PCREL_BRANCH7:
18653 case BFD_RELOC_THUMB_PCREL_BRANCH9:
18654 case BFD_RELOC_THUMB_PCREL_BRANCH12:
18655 case BFD_RELOC_THUMB_PCREL_BRANCH20:
18656 case BFD_RELOC_THUMB_PCREL_BRANCH23:
18657 case BFD_RELOC_THUMB_PCREL_BRANCH25:
18658 case BFD_RELOC_THUMB_PCREL_BLX:
18659 case BFD_RELOC_VTABLE_ENTRY:
18660 case BFD_RELOC_VTABLE_INHERIT:
18661 code = fixp->fx_r_type;
18664 case BFD_RELOC_ARM_LITERAL:
18665 case BFD_RELOC_ARM_HWLITERAL:
18666 /* If this is called then the a literal has
18667 been referenced across a section boundary. */
18668 as_bad_where (fixp->fx_file, fixp->fx_line,
18669 _("literal referenced across section boundary"));
18673 case BFD_RELOC_ARM_GOT32:
18674 case BFD_RELOC_ARM_GOTOFF:
18675 case BFD_RELOC_ARM_PLT32:
18676 case BFD_RELOC_ARM_TARGET1:
18677 case BFD_RELOC_ARM_ROSEGREL32:
18678 case BFD_RELOC_ARM_SBREL32:
18679 case BFD_RELOC_ARM_PREL31:
18680 case BFD_RELOC_ARM_TARGET2:
18681 case BFD_RELOC_ARM_TLS_LE32:
18682 case BFD_RELOC_ARM_TLS_LDO32:
18683 case BFD_RELOC_ARM_PCREL_CALL:
18684 case BFD_RELOC_ARM_PCREL_JUMP:
18685 case BFD_RELOC_ARM_ALU_PC_G0_NC:
18686 case BFD_RELOC_ARM_ALU_PC_G0:
18687 case BFD_RELOC_ARM_ALU_PC_G1_NC:
18688 case BFD_RELOC_ARM_ALU_PC_G1:
18689 case BFD_RELOC_ARM_ALU_PC_G2:
18690 case BFD_RELOC_ARM_LDR_PC_G0:
18691 case BFD_RELOC_ARM_LDR_PC_G1:
18692 case BFD_RELOC_ARM_LDR_PC_G2:
18693 case BFD_RELOC_ARM_LDRS_PC_G0:
18694 case BFD_RELOC_ARM_LDRS_PC_G1:
18695 case BFD_RELOC_ARM_LDRS_PC_G2:
18696 case BFD_RELOC_ARM_LDC_PC_G0:
18697 case BFD_RELOC_ARM_LDC_PC_G1:
18698 case BFD_RELOC_ARM_LDC_PC_G2:
18699 case BFD_RELOC_ARM_ALU_SB_G0_NC:
18700 case BFD_RELOC_ARM_ALU_SB_G0:
18701 case BFD_RELOC_ARM_ALU_SB_G1_NC:
18702 case BFD_RELOC_ARM_ALU_SB_G1:
18703 case BFD_RELOC_ARM_ALU_SB_G2:
18704 case BFD_RELOC_ARM_LDR_SB_G0:
18705 case BFD_RELOC_ARM_LDR_SB_G1:
18706 case BFD_RELOC_ARM_LDR_SB_G2:
18707 case BFD_RELOC_ARM_LDRS_SB_G0:
18708 case BFD_RELOC_ARM_LDRS_SB_G1:
18709 case BFD_RELOC_ARM_LDRS_SB_G2:
18710 case BFD_RELOC_ARM_LDC_SB_G0:
18711 case BFD_RELOC_ARM_LDC_SB_G1:
18712 case BFD_RELOC_ARM_LDC_SB_G2:
18713 code = fixp->fx_r_type;
18716 case BFD_RELOC_ARM_TLS_GD32:
18717 case BFD_RELOC_ARM_TLS_IE32:
18718 case BFD_RELOC_ARM_TLS_LDM32:
18719 /* BFD will include the symbol's address in the addend.
18720 But we don't want that, so subtract it out again here. */
18721 if (!S_IS_COMMON (fixp->fx_addsy))
18722 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
18723 code = fixp->fx_r_type;
18727 case BFD_RELOC_ARM_IMMEDIATE:
18728 as_bad_where (fixp->fx_file, fixp->fx_line,
18729 _("internal relocation (type: IMMEDIATE) not fixed up"));
18732 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
18733 as_bad_where (fixp->fx_file, fixp->fx_line,
18734 _("ADRL used for a symbol not defined in the same file"));
18737 case BFD_RELOC_ARM_OFFSET_IMM:
18738 if (section->use_rela_p)
18740 code = fixp->fx_r_type;
18744 if (fixp->fx_addsy != NULL
18745 && !S_IS_DEFINED (fixp->fx_addsy)
18746 && S_IS_LOCAL (fixp->fx_addsy))
18748 as_bad_where (fixp->fx_file, fixp->fx_line,
18749 _("undefined local label `%s'"),
18750 S_GET_NAME (fixp->fx_addsy));
18754 as_bad_where (fixp->fx_file, fixp->fx_line,
18755 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
18762 switch (fixp->fx_r_type)
18764 case BFD_RELOC_NONE: type = "NONE"; break;
18765 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
18766 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
18767 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
18768 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
18769 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
18770 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
18771 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
18772 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
18773 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
18774 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
18775 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
18776 default: type = _("<unknown>"); break;
18778 as_bad_where (fixp->fx_file, fixp->fx_line,
18779 _("cannot represent %s relocation in this object file format"),
18786 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
18788 && fixp->fx_addsy == GOT_symbol)
18790 code = BFD_RELOC_ARM_GOTPC;
18791 reloc->addend = fixp->fx_offset = reloc->address;
18795 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
18797 if (reloc->howto == NULL)
18799 as_bad_where (fixp->fx_file, fixp->fx_line,
18800 _("cannot represent %s relocation in this object file format"),
18801 bfd_get_reloc_code_name (code));
18805 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
18806 vtable entry to be used in the relocation's section offset. */
18807 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18808 reloc->address = fixp->fx_offset;
18813 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
18816 cons_fix_new_arm (fragS * frag,
18821 bfd_reloc_code_real_type type;
18825 FIXME: @@ Should look at CPU word size. */
18829 type = BFD_RELOC_8;
18832 type = BFD_RELOC_16;
18836 type = BFD_RELOC_32;
18839 type = BFD_RELOC_64;
18843 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
18846 #if defined OBJ_COFF || defined OBJ_ELF
18848 arm_validate_fix (fixS * fixP)
18850 /* If the destination of the branch is a defined symbol which does not have
18851 the THUMB_FUNC attribute, then we must be calling a function which has
18852 the (interfacearm) attribute. We look for the Thumb entry point to that
18853 function and change the branch to refer to that function instead. */
18854 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
18855 && fixP->fx_addsy != NULL
18856 && S_IS_DEFINED (fixP->fx_addsy)
18857 && ! THUMB_IS_FUNC (fixP->fx_addsy))
18859 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
18865 arm_force_relocation (struct fix * fixp)
18867 #if defined (OBJ_COFF) && defined (TE_PE)
18868 if (fixp->fx_r_type == BFD_RELOC_RVA)
18872 /* Resolve these relocations even if the symbol is extern or weak. */
18873 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
18874 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
18875 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
18876 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
18877 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
18878 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
18879 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
18882 /* Always leave these relocations for the linker. */
18883 if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
18884 && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
18885 || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
18888 return generic_force_reloc (fixp);
18892 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
18893 local labels from being added to the output symbol table when they
18894 are used with the ADRL pseudo op. The ADRL relocation should always
18895 be resolved before the binbary is emitted, so it is safe to say that
18896 it is adjustable. */
18899 arm_fix_adjustable (fixS * fixP)
18901 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
18908 /* Relocations against function names must be left unadjusted,
18909 so that the linker can use this information to generate interworking
18910 stubs. The MIPS version of this function
18911 also prevents relocations that are mips-16 specific, but I do not
18912 know why it does this.
18915 There is one other problem that ought to be addressed here, but
18916 which currently is not: Taking the address of a label (rather
18917 than a function) and then later jumping to that address. Such
18918 addresses also ought to have their bottom bit set (assuming that
18919 they reside in Thumb code), but at the moment they will not. */
18922 arm_fix_adjustable (fixS * fixP)
18924 if (fixP->fx_addsy == NULL)
18927 /* Preserve relocations against symbols with function type. */
18928 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
18931 if (THUMB_IS_FUNC (fixP->fx_addsy)
18932 && fixP->fx_subsy == NULL)
18935 /* We need the symbol name for the VTABLE entries. */
18936 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
18937 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
18940 /* Don't allow symbols to be discarded on GOT related relocs. */
18941 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
18942 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
18943 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
18944 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
18945 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
18946 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
18947 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
18948 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
18949 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
18952 /* Similarly for group relocations. */
18953 if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
18954 && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
18955 || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
18962 elf32_arm_target_format (void)
18965 return (target_big_endian
18966 ? "elf32-bigarm-symbian"
18967 : "elf32-littlearm-symbian");
18968 #elif defined (TE_VXWORKS)
18969 return (target_big_endian
18970 ? "elf32-bigarm-vxworks"
18971 : "elf32-littlearm-vxworks");
18973 if (target_big_endian)
18974 return "elf32-bigarm";
18976 return "elf32-littlearm";
18981 armelf_frob_symbol (symbolS * symp,
18984 elf_frob_symbol (symp, puntp);
18988 /* MD interface: Finalization. */
18990 /* A good place to do this, although this was probably not intended
18991 for this kind of use. We need to dump the literal pool before
18992 references are made to a null symbol pointer. */
18997 literal_pool * pool;
18999 for (pool = list_of_pools; pool; pool = pool->next)
19001 /* Put it at the end of the relevent section. */
19002 subseg_set (pool->section, pool->sub_section);
19004 arm_elf_change_section ();
19010 /* Adjust the symbol table. This marks Thumb symbols as distinct from
19014 arm_adjust_symtab (void)
19019 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19021 if (ARM_IS_THUMB (sym))
19023 if (THUMB_IS_FUNC (sym))
19025 /* Mark the symbol as a Thumb function. */
19026 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
19027 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
19028 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
19030 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
19031 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
19033 as_bad (_("%s: unexpected function type: %d"),
19034 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
19036 else switch (S_GET_STORAGE_CLASS (sym))
19039 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
19042 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
19045 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
19053 if (ARM_IS_INTERWORK (sym))
19054 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
19061 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
19063 if (ARM_IS_THUMB (sym))
19065 elf_symbol_type * elf_sym;
19067 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
19068 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
19070 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
19071 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
19073 /* If it's a .thumb_func, declare it as so,
19074 otherwise tag label as .code 16. */
19075 if (THUMB_IS_FUNC (sym))
19076 elf_sym->internal_elf_sym.st_info =
19077 ELF_ST_INFO (bind, STT_ARM_TFUNC);
19079 elf_sym->internal_elf_sym.st_info =
19080 ELF_ST_INFO (bind, STT_ARM_16BIT);
19087 /* MD interface: Initialization. */
19090 set_constant_flonums (void)
19094 for (i = 0; i < NUM_FLOAT_VALS; i++)
19095 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
19099 /* Auto-select Thumb mode if it's the only available instruction set for the
19100 given architecture. */
19103 autoselect_thumb_from_cpu_variant (void)
19105 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
19106 opcode_select (16);
19115 if ( (arm_ops_hsh = hash_new ()) == NULL
19116 || (arm_cond_hsh = hash_new ()) == NULL
19117 || (arm_shift_hsh = hash_new ()) == NULL
19118 || (arm_psr_hsh = hash_new ()) == NULL
19119 || (arm_v7m_psr_hsh = hash_new ()) == NULL
19120 || (arm_reg_hsh = hash_new ()) == NULL
19121 || (arm_reloc_hsh = hash_new ()) == NULL
19122 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
19123 as_fatal (_("virtual memory exhausted"));
19125 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
19126 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
19127 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
19128 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
19129 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
19130 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
19131 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
19132 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
19133 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
19134 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
19135 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
19136 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
19138 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
19140 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
19141 (PTR) (barrier_opt_names + i));
19143 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
19144 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
19147 set_constant_flonums ();
19149 /* Set the cpu variant based on the command-line options. We prefer
19150 -mcpu= over -march= if both are set (as for GCC); and we prefer
19151 -mfpu= over any other way of setting the floating point unit.
19152 Use of legacy options with new options are faulted. */
19155 if (mcpu_cpu_opt || march_cpu_opt)
19156 as_bad (_("use of old and new-style options to set CPU type"));
19158 mcpu_cpu_opt = legacy_cpu;
19160 else if (!mcpu_cpu_opt)
19161 mcpu_cpu_opt = march_cpu_opt;
19166 as_bad (_("use of old and new-style options to set FPU type"));
19168 mfpu_opt = legacy_fpu;
19170 else if (!mfpu_opt)
19172 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
19173 /* Some environments specify a default FPU. If they don't, infer it
19174 from the processor. */
19176 mfpu_opt = mcpu_fpu_opt;
19178 mfpu_opt = march_fpu_opt;
19180 mfpu_opt = &fpu_default;
19187 mfpu_opt = &fpu_default;
19188 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
19189 mfpu_opt = &fpu_arch_vfp_v2;
19191 mfpu_opt = &fpu_arch_fpa;
19197 mcpu_cpu_opt = &cpu_default;
19198 selected_cpu = cpu_default;
19202 selected_cpu = *mcpu_cpu_opt;
19204 mcpu_cpu_opt = &arm_arch_any;
19207 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
19209 autoselect_thumb_from_cpu_variant ();
19211 arm_arch_used = thumb_arch_used = arm_arch_none;
19213 #if defined OBJ_COFF || defined OBJ_ELF
19215 unsigned int flags = 0;
19217 #if defined OBJ_ELF
19218 flags = meabi_flags;
19220 switch (meabi_flags)
19222 case EF_ARM_EABI_UNKNOWN:
19224 /* Set the flags in the private structure. */
19225 if (uses_apcs_26) flags |= F_APCS26;
19226 if (support_interwork) flags |= F_INTERWORK;
19227 if (uses_apcs_float) flags |= F_APCS_FLOAT;
19228 if (pic_code) flags |= F_PIC;
19229 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
19230 flags |= F_SOFT_FLOAT;
19232 switch (mfloat_abi_opt)
19234 case ARM_FLOAT_ABI_SOFT:
19235 case ARM_FLOAT_ABI_SOFTFP:
19236 flags |= F_SOFT_FLOAT;
19239 case ARM_FLOAT_ABI_HARD:
19240 if (flags & F_SOFT_FLOAT)
19241 as_bad (_("hard-float conflicts with specified fpu"));
19245 /* Using pure-endian doubles (even if soft-float). */
19246 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
19247 flags |= F_VFP_FLOAT;
19249 #if defined OBJ_ELF
19250 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
19251 flags |= EF_ARM_MAVERICK_FLOAT;
19254 case EF_ARM_EABI_VER4:
19255 case EF_ARM_EABI_VER5:
19256 /* No additional flags to set. */
19263 bfd_set_private_flags (stdoutput, flags);
19265 /* We have run out flags in the COFF header to encode the
19266 status of ATPCS support, so instead we create a dummy,
19267 empty, debug section called .arm.atpcs. */
19272 sec = bfd_make_section (stdoutput, ".arm.atpcs");
19276 bfd_set_section_flags
19277 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
19278 bfd_set_section_size (stdoutput, sec, 0);
19279 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
19285 /* Record the CPU type as well. */
19286 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
19287 mach = bfd_mach_arm_iWMMXt2;
19288 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
19289 mach = bfd_mach_arm_iWMMXt;
19290 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
19291 mach = bfd_mach_arm_XScale;
19292 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
19293 mach = bfd_mach_arm_ep9312;
19294 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
19295 mach = bfd_mach_arm_5TE;
19296 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
19298 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19299 mach = bfd_mach_arm_5T;
19301 mach = bfd_mach_arm_5;
19303 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
19305 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
19306 mach = bfd_mach_arm_4T;
19308 mach = bfd_mach_arm_4;
19310 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
19311 mach = bfd_mach_arm_3M;
19312 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
19313 mach = bfd_mach_arm_3;
19314 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
19315 mach = bfd_mach_arm_2a;
19316 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
19317 mach = bfd_mach_arm_2;
19319 mach = bfd_mach_arm_unknown;
19321 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
19324 /* Command line processing. */
19327 Invocation line includes a switch not recognized by the base assembler.
19328 See if it's a processor-specific option.
19330 This routine is somewhat complicated by the need for backwards
19331 compatibility (since older releases of gcc can't be changed).
19332 The new options try to make the interface as compatible as
19335 New options (supported) are:
19337 -mcpu=<cpu name> Assemble for selected processor
19338 -march=<architecture name> Assemble for selected architecture
19339 -mfpu=<fpu architecture> Assemble for selected FPU.
19340 -EB/-mbig-endian Big-endian
19341 -EL/-mlittle-endian Little-endian
19342 -k Generate PIC code
19343 -mthumb Start in Thumb mode
19344 -mthumb-interwork Code supports ARM/Thumb interworking
19346 For now we will also provide support for:
19348 -mapcs-32 32-bit Program counter
19349 -mapcs-26 26-bit Program counter
19350 -macps-float Floats passed in FP registers
19351 -mapcs-reentrant Reentrant code
19353 (sometime these will probably be replaced with -mapcs=<list of options>
19354 and -matpcs=<list of options>)
19356 The remaining options are only supported for back-wards compatibility.
19357 Cpu variants, the arm part is optional:
19358 -m[arm]1 Currently not supported.
19359 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
19360 -m[arm]3 Arm 3 processor
19361 -m[arm]6[xx], Arm 6 processors
19362 -m[arm]7[xx][t][[d]m] Arm 7 processors
19363 -m[arm]8[10] Arm 8 processors
19364 -m[arm]9[20][tdmi] Arm 9 processors
19365 -mstrongarm[110[0]] StrongARM processors
19366 -mxscale XScale processors
19367 -m[arm]v[2345[t[e]]] Arm architectures
19368 -mall All (except the ARM1)
19370 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
19371 -mfpe-old (No float load/store multiples)
19372 -mvfpxd VFP Single precision
19374 -mno-fpu Disable all floating point instructions
19376 The following CPU names are recognized:
19377 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
19378 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
19379 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
19380 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
19381 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
19382 arm10t arm10e, arm1020t, arm1020e, arm10200e,
19383 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
19387 const char * md_shortopts = "m:k";
19389 #ifdef ARM_BI_ENDIAN
19390 #define OPTION_EB (OPTION_MD_BASE + 0)
19391 #define OPTION_EL (OPTION_MD_BASE + 1)
19393 #if TARGET_BYTES_BIG_ENDIAN
19394 #define OPTION_EB (OPTION_MD_BASE + 0)
19396 #define OPTION_EL (OPTION_MD_BASE + 1)
19400 struct option md_longopts[] =
19403 {"EB", no_argument, NULL, OPTION_EB},
19406 {"EL", no_argument, NULL, OPTION_EL},
19408 {NULL, no_argument, NULL, 0}
19411 size_t md_longopts_size = sizeof (md_longopts);
19413 struct arm_option_table
19415 char *option; /* Option name to match. */
19416 char *help; /* Help information. */
19417 int *var; /* Variable to change. */
19418 int value; /* What to change it to. */
19419 char *deprecated; /* If non-null, print this message. */
19422 struct arm_option_table arm_opts[] =
19424 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
19425 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
19426 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
19427 &support_interwork, 1, NULL},
19428 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
19429 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
19430 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
19432 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
19433 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
19434 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
19435 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
19438 /* These are recognized by the assembler, but have no affect on code. */
19439 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
19440 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
19441 {NULL, NULL, NULL, 0, NULL}
19444 struct arm_legacy_option_table
19446 char *option; /* Option name to match. */
19447 const arm_feature_set **var; /* Variable to change. */
19448 const arm_feature_set value; /* What to change it to. */
19449 char *deprecated; /* If non-null, print this message. */
19452 const struct arm_legacy_option_table arm_legacy_opts[] =
19454 /* DON'T add any new processors to this list -- we want the whole list
19455 to go away... Add them to the processors table instead. */
19456 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19457 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
19458 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19459 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
19460 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19461 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
19462 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19463 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
19464 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19465 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
19466 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19467 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
19468 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19469 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
19470 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19471 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
19472 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19473 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
19474 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19475 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
19476 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19477 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
19478 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19479 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
19480 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19481 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
19482 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19483 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
19484 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19485 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
19486 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19487 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
19488 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19489 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
19490 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19491 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
19492 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19493 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
19494 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19495 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
19496 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19497 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
19498 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19499 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
19500 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19501 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
19502 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19503 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19504 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19505 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
19506 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19507 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
19508 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19509 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
19510 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19511 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
19512 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19513 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
19514 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19515 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
19516 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19517 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
19518 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19519 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
19520 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19521 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
19522 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19523 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
19524 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
19525 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
19526 N_("use -mcpu=strongarm110")},
19527 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
19528 N_("use -mcpu=strongarm1100")},
19529 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
19530 N_("use -mcpu=strongarm1110")},
19531 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
19532 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
19533 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
19535 /* Architecture variants -- don't add any more to this list either. */
19536 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19537 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
19538 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19539 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
19540 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19541 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
19542 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19543 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
19544 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19545 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
19546 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19547 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
19548 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19549 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
19550 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19551 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
19552 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19553 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
19555 /* Floating point variants -- don't add any more to this list either. */
19556 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
19557 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
19558 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
19559 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
19560 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
19562 {NULL, NULL, ARM_ARCH_NONE, NULL}
19565 struct arm_cpu_option_table
19568 const arm_feature_set value;
19569 /* For some CPUs we assume an FPU unless the user explicitly sets
19571 const arm_feature_set default_fpu;
19572 /* The canonical name of the CPU, or NULL to use NAME converted to upper
19574 const char *canonical_name;
19577 /* This list should, at a minimum, contain all the cpu names
19578 recognized by GCC. */
19579 static const struct arm_cpu_option_table arm_cpus[] =
19581 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
19582 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
19583 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
19584 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19585 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
19586 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19587 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19588 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19589 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19590 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19591 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19592 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19593 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19594 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19595 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19596 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
19597 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19598 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19599 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19600 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19601 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19602 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19603 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19604 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19605 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19606 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19607 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19608 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
19609 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19610 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19611 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19612 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19613 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19614 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19615 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19616 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19617 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19618 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
19619 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19620 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
19621 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19622 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19623 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19624 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
19625 /* For V5 or later processors we default to using VFP; but the user
19626 should really set the FPU type explicitly. */
19627 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19628 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19629 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19630 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
19631 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19632 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19633 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
19634 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19635 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
19636 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
19637 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19638 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19639 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19640 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19641 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19642 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
19643 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
19644 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19645 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
19646 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
19647 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
19648 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
19649 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
19650 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
19651 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
19652 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
19653 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
19654 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
19655 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
19656 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
19657 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
19658 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
19659 | FPU_NEON_EXT_V1),
19661 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
19662 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
19663 /* ??? XSCALE is really an architecture. */
19664 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19665 /* ??? iwmmxt is not a processor. */
19666 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
19667 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP_V2, NULL},
19668 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
19670 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
19671 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
19674 struct arm_arch_option_table
19677 const arm_feature_set value;
19678 const arm_feature_set default_fpu;
19681 /* This list should, at a minimum, contain all the architecture names
19682 recognized by GCC. */
19683 static const struct arm_arch_option_table arm_archs[] =
19685 {"all", ARM_ANY, FPU_ARCH_FPA},
19686 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
19687 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
19688 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
19689 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
19690 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
19691 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
19692 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
19693 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
19694 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
19695 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
19696 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
19697 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
19698 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
19699 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
19700 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
19701 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
19702 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
19703 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
19704 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
19705 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
19706 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
19707 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
19708 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
19709 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
19710 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
19711 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
19712 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
19713 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
19714 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
19715 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
19716 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
19717 {"iwmmxt2", ARM_ARCH_IWMMXT2,FPU_ARCH_VFP},
19718 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
19721 /* ISA extensions in the co-processor space. */
19722 struct arm_option_cpu_value_table
19725 const arm_feature_set value;
19728 static const struct arm_option_cpu_value_table arm_extensions[] =
19730 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
19731 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
19732 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
19733 {"iwmmxt2", ARM_FEATURE (0, ARM_CEXT_IWMMXT2)},
19734 {NULL, ARM_ARCH_NONE}
19737 /* This list should, at a minimum, contain all the fpu names
19738 recognized by GCC. */
19739 static const struct arm_option_cpu_value_table arm_fpus[] =
19741 {"softfpa", FPU_NONE},
19742 {"fpe", FPU_ARCH_FPE},
19743 {"fpe2", FPU_ARCH_FPE},
19744 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
19745 {"fpa", FPU_ARCH_FPA},
19746 {"fpa10", FPU_ARCH_FPA},
19747 {"fpa11", FPU_ARCH_FPA},
19748 {"arm7500fe", FPU_ARCH_FPA},
19749 {"softvfp", FPU_ARCH_VFP},
19750 {"softvfp+vfp", FPU_ARCH_VFP_V2},
19751 {"vfp", FPU_ARCH_VFP_V2},
19752 {"vfp9", FPU_ARCH_VFP_V2},
19753 {"vfp3", FPU_ARCH_VFP_V3},
19754 {"vfp10", FPU_ARCH_VFP_V2},
19755 {"vfp10-r0", FPU_ARCH_VFP_V1},
19756 {"vfpxd", FPU_ARCH_VFP_V1xD},
19757 {"arm1020t", FPU_ARCH_VFP_V1},
19758 {"arm1020e", FPU_ARCH_VFP_V2},
19759 {"arm1136jfs", FPU_ARCH_VFP_V2},
19760 {"arm1136jf-s", FPU_ARCH_VFP_V2},
19761 {"maverick", FPU_ARCH_MAVERICK},
19762 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
19763 {NULL, ARM_ARCH_NONE}
19766 struct arm_option_value_table
19772 static const struct arm_option_value_table arm_float_abis[] =
19774 {"hard", ARM_FLOAT_ABI_HARD},
19775 {"softfp", ARM_FLOAT_ABI_SOFTFP},
19776 {"soft", ARM_FLOAT_ABI_SOFT},
19781 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
19782 static const struct arm_option_value_table arm_eabis[] =
19784 {"gnu", EF_ARM_EABI_UNKNOWN},
19785 {"4", EF_ARM_EABI_VER4},
19786 {"5", EF_ARM_EABI_VER5},
19791 struct arm_long_option_table
19793 char * option; /* Substring to match. */
19794 char * help; /* Help information. */
19795 int (* func) (char * subopt); /* Function to decode sub-option. */
19796 char * deprecated; /* If non-null, print this message. */
19800 arm_parse_extension (char * str, const arm_feature_set **opt_p)
19802 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
19804 /* Copy the feature set, so that we can modify it. */
19805 *ext_set = **opt_p;
19808 while (str != NULL && *str != 0)
19810 const struct arm_option_cpu_value_table * opt;
19816 as_bad (_("invalid architectural extension"));
19821 ext = strchr (str, '+');
19824 optlen = ext - str;
19826 optlen = strlen (str);
19830 as_bad (_("missing architectural extension"));
19834 for (opt = arm_extensions; opt->name != NULL; opt++)
19835 if (strncmp (opt->name, str, optlen) == 0)
19837 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
19841 if (opt->name == NULL)
19843 as_bad (_("unknown architectural extnsion `%s'"), str);
19854 arm_parse_cpu (char * str)
19856 const struct arm_cpu_option_table * opt;
19857 char * ext = strchr (str, '+');
19861 optlen = ext - str;
19863 optlen = strlen (str);
19867 as_bad (_("missing cpu name `%s'"), str);
19871 for (opt = arm_cpus; opt->name != NULL; opt++)
19872 if (strncmp (opt->name, str, optlen) == 0)
19874 mcpu_cpu_opt = &opt->value;
19875 mcpu_fpu_opt = &opt->default_fpu;
19876 if (opt->canonical_name)
19877 strcpy(selected_cpu_name, opt->canonical_name);
19881 for (i = 0; i < optlen; i++)
19882 selected_cpu_name[i] = TOUPPER (opt->name[i]);
19883 selected_cpu_name[i] = 0;
19887 return arm_parse_extension (ext, &mcpu_cpu_opt);
19892 as_bad (_("unknown cpu `%s'"), str);
19897 arm_parse_arch (char * str)
19899 const struct arm_arch_option_table *opt;
19900 char *ext = strchr (str, '+');
19904 optlen = ext - str;
19906 optlen = strlen (str);
19910 as_bad (_("missing architecture name `%s'"), str);
19914 for (opt = arm_archs; opt->name != NULL; opt++)
19915 if (streq (opt->name, str))
19917 march_cpu_opt = &opt->value;
19918 march_fpu_opt = &opt->default_fpu;
19919 strcpy(selected_cpu_name, opt->name);
19922 return arm_parse_extension (ext, &march_cpu_opt);
19927 as_bad (_("unknown architecture `%s'\n"), str);
19932 arm_parse_fpu (char * str)
19934 const struct arm_option_cpu_value_table * opt;
19936 for (opt = arm_fpus; opt->name != NULL; opt++)
19937 if (streq (opt->name, str))
19939 mfpu_opt = &opt->value;
19943 as_bad (_("unknown floating point format `%s'\n"), str);
19948 arm_parse_float_abi (char * str)
19950 const struct arm_option_value_table * opt;
19952 for (opt = arm_float_abis; opt->name != NULL; opt++)
19953 if (streq (opt->name, str))
19955 mfloat_abi_opt = opt->value;
19959 as_bad (_("unknown floating point abi `%s'\n"), str);
19965 arm_parse_eabi (char * str)
19967 const struct arm_option_value_table *opt;
19969 for (opt = arm_eabis; opt->name != NULL; opt++)
19970 if (streq (opt->name, str))
19972 meabi_flags = opt->value;
19975 as_bad (_("unknown EABI `%s'\n"), str);
19980 struct arm_long_option_table arm_long_opts[] =
19982 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
19983 arm_parse_cpu, NULL},
19984 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
19985 arm_parse_arch, NULL},
19986 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
19987 arm_parse_fpu, NULL},
19988 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
19989 arm_parse_float_abi, NULL},
19991 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
19992 arm_parse_eabi, NULL},
19994 {NULL, NULL, 0, NULL}
19998 md_parse_option (int c, char * arg)
20000 struct arm_option_table *opt;
20001 const struct arm_legacy_option_table *fopt;
20002 struct arm_long_option_table *lopt;
20008 target_big_endian = 1;
20014 target_big_endian = 0;
20019 /* Listing option. Just ignore these, we don't support additional
20024 for (opt = arm_opts; opt->option != NULL; opt++)
20026 if (c == opt->option[0]
20027 && ((arg == NULL && opt->option[1] == 0)
20028 || streq (arg, opt->option + 1)))
20030 #if WARN_DEPRECATED
20031 /* If the option is deprecated, tell the user. */
20032 if (opt->deprecated != NULL)
20033 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20034 arg ? arg : "", _(opt->deprecated));
20037 if (opt->var != NULL)
20038 *opt->var = opt->value;
20044 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
20046 if (c == fopt->option[0]
20047 && ((arg == NULL && fopt->option[1] == 0)
20048 || streq (arg, fopt->option + 1)))
20050 #if WARN_DEPRECATED
20051 /* If the option is deprecated, tell the user. */
20052 if (fopt->deprecated != NULL)
20053 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
20054 arg ? arg : "", _(fopt->deprecated));
20057 if (fopt->var != NULL)
20058 *fopt->var = &fopt->value;
20064 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20066 /* These options are expected to have an argument. */
20067 if (c == lopt->option[0]
20069 && strncmp (arg, lopt->option + 1,
20070 strlen (lopt->option + 1)) == 0)
20072 #if WARN_DEPRECATED
20073 /* If the option is deprecated, tell the user. */
20074 if (lopt->deprecated != NULL)
20075 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
20076 _(lopt->deprecated));
20079 /* Call the sup-option parser. */
20080 return lopt->func (arg + strlen (lopt->option) - 1);
20091 md_show_usage (FILE * fp)
20093 struct arm_option_table *opt;
20094 struct arm_long_option_table *lopt;
20096 fprintf (fp, _(" ARM-specific assembler options:\n"));
20098 for (opt = arm_opts; opt->option != NULL; opt++)
20099 if (opt->help != NULL)
20100 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
20102 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
20103 if (lopt->help != NULL)
20104 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
20108 -EB assemble code for a big-endian cpu\n"));
20113 -EL assemble code for a little-endian cpu\n"));
20122 arm_feature_set flags;
20123 } cpu_arch_ver_table;
20125 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
20126 least features first. */
20127 static const cpu_arch_ver_table cpu_arch_ver[] =
20132 {4, ARM_ARCH_V5TE},
20133 {5, ARM_ARCH_V5TEJ},
20137 {9, ARM_ARCH_V6T2},
20138 {10, ARM_ARCH_V7A},
20139 {10, ARM_ARCH_V7R},
20140 {10, ARM_ARCH_V7M},
20144 /* Set the public EABI object attributes. */
20146 aeabi_set_public_attributes (void)
20149 arm_feature_set flags;
20150 arm_feature_set tmp;
20151 const cpu_arch_ver_table *p;
20153 /* Choose the architecture based on the capabilities of the requested cpu
20154 (if any) and/or the instructions actually used. */
20155 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
20156 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
20157 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
20158 /*Allow the user to override the reported architecture. */
20161 ARM_CLEAR_FEATURE (flags, flags, arm_arch_any);
20162 ARM_MERGE_FEATURE_SETS (flags, flags, *object_arch);
20167 for (p = cpu_arch_ver; p->val; p++)
20169 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
20172 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
20176 /* Tag_CPU_name. */
20177 if (selected_cpu_name[0])
20181 p = selected_cpu_name;
20182 if (strncmp(p, "armv", 4) == 0)
20187 for (i = 0; p[i]; i++)
20188 p[i] = TOUPPER (p[i]);
20190 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
20192 /* Tag_CPU_arch. */
20193 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
20194 /* Tag_CPU_arch_profile. */
20195 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
20196 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
20197 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
20198 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
20199 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
20200 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
20201 /* Tag_ARM_ISA_use. */
20202 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
20203 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
20204 /* Tag_THUMB_ISA_use. */
20205 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
20206 elf32_arm_add_eabi_attr_int (stdoutput, 9,
20207 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
20208 /* Tag_VFP_arch. */
20209 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
20210 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
20211 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
20212 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
20213 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
20214 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
20215 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
20216 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
20217 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
20218 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
20219 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
20220 /* Tag_WMMX_arch. */
20221 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
20222 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
20223 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
20224 /* Tag_NEON_arch. */
20225 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
20226 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
20227 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
20230 /* Add the .ARM.attributes section. */
20239 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
20242 aeabi_set_public_attributes ();
20243 size = elf32_arm_eabi_attr_size (stdoutput);
20244 s = subseg_new (".ARM.attributes", 0);
20245 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
20246 addr = frag_now_fix ();
20247 p = frag_more (size);
20248 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
20252 /* Parse a .cpu directive. */
20255 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
20257 const struct arm_cpu_option_table *opt;
20261 name = input_line_pointer;
20262 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20263 input_line_pointer++;
20264 saved_char = *input_line_pointer;
20265 *input_line_pointer = 0;
20267 /* Skip the first "all" entry. */
20268 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
20269 if (streq (opt->name, name))
20271 mcpu_cpu_opt = &opt->value;
20272 selected_cpu = opt->value;
20273 if (opt->canonical_name)
20274 strcpy(selected_cpu_name, opt->canonical_name);
20278 for (i = 0; opt->name[i]; i++)
20279 selected_cpu_name[i] = TOUPPER (opt->name[i]);
20280 selected_cpu_name[i] = 0;
20282 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20283 *input_line_pointer = saved_char;
20284 demand_empty_rest_of_line ();
20287 as_bad (_("unknown cpu `%s'"), name);
20288 *input_line_pointer = saved_char;
20289 ignore_rest_of_line ();
20293 /* Parse a .arch directive. */
20296 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
20298 const struct arm_arch_option_table *opt;
20302 name = input_line_pointer;
20303 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20304 input_line_pointer++;
20305 saved_char = *input_line_pointer;
20306 *input_line_pointer = 0;
20308 /* Skip the first "all" entry. */
20309 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20310 if (streq (opt->name, name))
20312 mcpu_cpu_opt = &opt->value;
20313 selected_cpu = opt->value;
20314 strcpy(selected_cpu_name, opt->name);
20315 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20316 *input_line_pointer = saved_char;
20317 demand_empty_rest_of_line ();
20321 as_bad (_("unknown architecture `%s'\n"), name);
20322 *input_line_pointer = saved_char;
20323 ignore_rest_of_line ();
20327 /* Parse a .object_arch directive. */
20330 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
20332 const struct arm_arch_option_table *opt;
20336 name = input_line_pointer;
20337 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20338 input_line_pointer++;
20339 saved_char = *input_line_pointer;
20340 *input_line_pointer = 0;
20342 /* Skip the first "all" entry. */
20343 for (opt = arm_archs + 1; opt->name != NULL; opt++)
20344 if (streq (opt->name, name))
20346 object_arch = &opt->value;
20347 *input_line_pointer = saved_char;
20348 demand_empty_rest_of_line ();
20352 as_bad (_("unknown architecture `%s'\n"), name);
20353 *input_line_pointer = saved_char;
20354 ignore_rest_of_line ();
20358 /* Parse a .fpu directive. */
20361 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
20363 const struct arm_option_cpu_value_table *opt;
20367 name = input_line_pointer;
20368 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
20369 input_line_pointer++;
20370 saved_char = *input_line_pointer;
20371 *input_line_pointer = 0;
20373 for (opt = arm_fpus; opt->name != NULL; opt++)
20374 if (streq (opt->name, name))
20376 mfpu_opt = &opt->value;
20377 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
20378 *input_line_pointer = saved_char;
20379 demand_empty_rest_of_line ();
20383 as_bad (_("unknown floating point format `%s'\n"), name);
20384 *input_line_pointer = saved_char;
20385 ignore_rest_of_line ();
20387 #endif /* OBJ_ELF */