1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
34 /* Need TARGET_CPU. */
41 #include "opcode/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
56 /* This structure holds the unwinding state. */
61 symbolS * table_entry;
62 symbolS * personality_routine;
63 int personality_index;
64 /* The segment containing the function. */
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes;
71 /* The number of bytes pushed to the stack. */
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
81 /* Nonzero if an unwind_setfp directive has been seen. */
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored:1;
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency = 0;
101 /* Types of processor to assemble for. */
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
128 #endif /* ifndef FPU_DEFAULT */
130 #define streq(a, b) (strcmp (a, b) == 0)
132 static arm_feature_set cpu_variant;
133 static arm_feature_set arm_arch_used;
134 static arm_feature_set thumb_arch_used;
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26 = FALSE;
138 static int atpcs = FALSE;
139 static int support_interwork = FALSE;
140 static int uses_apcs_float = FALSE;
141 static int pic_code = FALSE;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default = FPU_DEFAULT;
157 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
158 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
159 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
160 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
161 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
162 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
163 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
164 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
167 static const arm_feature_set cpu_default = CPU_DEFAULT;
170 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
171 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
173 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
174 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
175 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
176 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
177 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
178 static const arm_feature_set arm_ext_v4t_5 =
179 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
181 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
182 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
183 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
184 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
185 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
186 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
189 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
190 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
191 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
192 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
193 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
195 static const arm_feature_set arm_arch_any = ARM_ANY;
196 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
198 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
200 static const arm_feature_set arm_cext_iwmmxt =
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
202 static const arm_feature_set arm_cext_xscale =
203 ARM_FEATURE (0, ARM_CEXT_XSCALE);
204 static const arm_feature_set arm_cext_maverick =
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
206 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
207 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
208 static const arm_feature_set fpu_vfp_ext_v1xd =
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
210 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
211 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
212 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
213 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
215 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
217 static int mfloat_abi_opt = -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name[16];
224 static int meabi_flags = EABI_DEFAULT;
226 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS * GOT_symbol;
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
239 static int thumb_mode = 0;
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
253 Important differences from the old Thumb mode:
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
264 static bfd_boolean unified_syntax = FALSE;
279 enum neon_el_type type;
283 #define NEON_MAX_TYPE_ELS 4
287 struct neon_type_el el[NEON_MAX_TYPE_ELS];
294 unsigned long instruction;
298 struct neon_type vectype;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
304 bfd_reloc_code_real_type type;
313 struct neon_type_el vectype;
314 unsigned present : 1; /* Operand present. */
315 unsigned isreg : 1; /* Operand was a register. */
316 unsigned immisreg : 1; /* .imm field is a second register. */
317 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
318 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
319 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
320 instructions. This allows us to disambiguate ARM <-> vector insns. */
321 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
322 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
323 unsigned hasreloc : 1; /* Operand has relocation suffix. */
324 unsigned writeback : 1; /* Operand has trailing ! */
325 unsigned preind : 1; /* Preindexed address. */
326 unsigned postind : 1; /* Postindexed address. */
327 unsigned negative : 1; /* Index register was negated. */
328 unsigned shifted : 1; /* Shift applied to operation. */
329 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
333 static struct arm_it inst;
335 #define NUM_FLOAT_VALS 8
337 const char * fp_const[] =
339 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
342 /* Number of littlenums required to hold an extended precision number. */
343 #define MAX_LITTLENUMS 6
345 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
355 #define CP_T_X 0x00008000
356 #define CP_T_Y 0x00400000
358 #define CONDS_BIT 0x00100000
359 #define LOAD_BIT 0x00100000
361 #define DOUBLE_LOAD_FLAG 0x00000001
365 const char * template;
369 #define COND_ALWAYS 0xE
373 const char *template;
377 struct asm_barrier_opt
379 const char *template;
383 /* The bit that distinguishes CPSR and SPSR. */
384 #define SPSR_BIT (1 << 22)
386 /* The individual PSR flag bits. */
387 #define PSR_c (1 << 16)
388 #define PSR_x (1 << 17)
389 #define PSR_s (1 << 18)
390 #define PSR_f (1 << 19)
395 bfd_reloc_code_real_type reloc;
400 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
401 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
406 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
409 /* Bits for DEFINED field in neon_typed_alias. */
410 #define NTA_HASTYPE 1
411 #define NTA_HASINDEX 2
413 struct neon_typed_alias
415 unsigned char defined;
417 struct neon_type_el eltype;
420 /* ARM register categories. This includes coprocessor numbers and various
421 architecture extensions' registers. */
445 /* Structure for a hash table entry for a register.
446 If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
447 information which states whether a vector type or index is specified (for a
448 register alias created with .dn or .qn). Otherwise NEON should be NULL. */
452 unsigned char number;
454 unsigned char builtin;
455 struct neon_typed_alias *neon;
458 /* Diagnostics used when we don't get a register of the expected type. */
459 const char *const reg_expected_msgs[] =
461 N_("ARM register expected"),
462 N_("bad or missing co-processor number"),
463 N_("co-processor register expected"),
464 N_("FPA register expected"),
465 N_("VFP single precision register expected"),
466 N_("VFP/Neon double precision register expected"),
467 N_("Neon quad precision register expected"),
468 N_("Neon double or quad precision register expected"),
469 N_("VFP system register expected"),
470 N_("Maverick MVF register expected"),
471 N_("Maverick MVD register expected"),
472 N_("Maverick MVFX register expected"),
473 N_("Maverick MVDX register expected"),
474 N_("Maverick MVAX register expected"),
475 N_("Maverick DSPSC register expected"),
476 N_("iWMMXt data register expected"),
477 N_("iWMMXt control register expected"),
478 N_("iWMMXt scalar register expected"),
479 N_("XScale accumulator register expected"),
482 /* Some well known registers that we refer to directly elsewhere. */
487 /* ARM instructions take 4bytes in the object file, Thumb instructions
493 /* Basic string to match. */
494 const char *template;
496 /* Parameters to instruction. */
497 unsigned char operands[8];
499 /* Conditional tag - see opcode_lookup. */
500 unsigned int tag : 4;
502 /* Basic instruction code. */
503 unsigned int avalue : 28;
505 /* Thumb-format instruction code. */
508 /* Which architecture variant provides this instruction. */
509 const arm_feature_set *avariant;
510 const arm_feature_set *tvariant;
512 /* Function to call to encode instruction in ARM format. */
513 void (* aencode) (void);
515 /* Function to call to encode instruction in Thumb format. */
516 void (* tencode) (void);
519 /* Defines for various bits that we will want to toggle. */
520 #define INST_IMMEDIATE 0x02000000
521 #define OFFSET_REG 0x02000000
522 #define HWOFFSET_IMM 0x00400000
523 #define SHIFT_BY_REG 0x00000010
524 #define PRE_INDEX 0x01000000
525 #define INDEX_UP 0x00800000
526 #define WRITE_BACK 0x00200000
527 #define LDM_TYPE_2_OR_3 0x00400000
529 #define LITERAL_MASK 0xf000f000
530 #define OPCODE_MASK 0xfe1fffff
531 #define V4_STR_BIT 0x00000020
533 #define DATA_OP_SHIFT 21
535 #define T2_OPCODE_MASK 0xfe1fffff
536 #define T2_DATA_OP_SHIFT 21
538 /* Codes to distinguish the arithmetic instructions. */
549 #define OPCODE_CMP 10
550 #define OPCODE_CMN 11
551 #define OPCODE_ORR 12
552 #define OPCODE_MOV 13
553 #define OPCODE_BIC 14
554 #define OPCODE_MVN 15
556 #define T2_OPCODE_AND 0
557 #define T2_OPCODE_BIC 1
558 #define T2_OPCODE_ORR 2
559 #define T2_OPCODE_ORN 3
560 #define T2_OPCODE_EOR 4
561 #define T2_OPCODE_ADD 8
562 #define T2_OPCODE_ADC 10
563 #define T2_OPCODE_SBC 11
564 #define T2_OPCODE_SUB 13
565 #define T2_OPCODE_RSB 14
567 #define T_OPCODE_MUL 0x4340
568 #define T_OPCODE_TST 0x4200
569 #define T_OPCODE_CMN 0x42c0
570 #define T_OPCODE_NEG 0x4240
571 #define T_OPCODE_MVN 0x43c0
573 #define T_OPCODE_ADD_R3 0x1800
574 #define T_OPCODE_SUB_R3 0x1a00
575 #define T_OPCODE_ADD_HI 0x4400
576 #define T_OPCODE_ADD_ST 0xb000
577 #define T_OPCODE_SUB_ST 0xb080
578 #define T_OPCODE_ADD_SP 0xa800
579 #define T_OPCODE_ADD_PC 0xa000
580 #define T_OPCODE_ADD_I8 0x3000
581 #define T_OPCODE_SUB_I8 0x3800
582 #define T_OPCODE_ADD_I3 0x1c00
583 #define T_OPCODE_SUB_I3 0x1e00
585 #define T_OPCODE_ASR_R 0x4100
586 #define T_OPCODE_LSL_R 0x4080
587 #define T_OPCODE_LSR_R 0x40c0
588 #define T_OPCODE_ROR_R 0x41c0
589 #define T_OPCODE_ASR_I 0x1000
590 #define T_OPCODE_LSL_I 0x0000
591 #define T_OPCODE_LSR_I 0x0800
593 #define T_OPCODE_MOV_I8 0x2000
594 #define T_OPCODE_CMP_I8 0x2800
595 #define T_OPCODE_CMP_LR 0x4280
596 #define T_OPCODE_MOV_HR 0x4600
597 #define T_OPCODE_CMP_HR 0x4500
599 #define T_OPCODE_LDR_PC 0x4800
600 #define T_OPCODE_LDR_SP 0x9800
601 #define T_OPCODE_STR_SP 0x9000
602 #define T_OPCODE_LDR_IW 0x6800
603 #define T_OPCODE_STR_IW 0x6000
604 #define T_OPCODE_LDR_IH 0x8800
605 #define T_OPCODE_STR_IH 0x8000
606 #define T_OPCODE_LDR_IB 0x7800
607 #define T_OPCODE_STR_IB 0x7000
608 #define T_OPCODE_LDR_RW 0x5800
609 #define T_OPCODE_STR_RW 0x5000
610 #define T_OPCODE_LDR_RH 0x5a00
611 #define T_OPCODE_STR_RH 0x5200
612 #define T_OPCODE_LDR_RB 0x5c00
613 #define T_OPCODE_STR_RB 0x5400
615 #define T_OPCODE_PUSH 0xb400
616 #define T_OPCODE_POP 0xbc00
618 #define T_OPCODE_BRANCH 0xe000
620 #define THUMB_SIZE 2 /* Size of thumb instruction. */
621 #define THUMB_PP_PC_LR 0x0100
622 #define THUMB_LOAD_BIT 0x0800
623 #define THUMB2_LOAD_BIT 0x00100000
625 #define BAD_ARGS _("bad arguments to instruction")
626 #define BAD_PC _("r15 not allowed here")
627 #define BAD_COND _("instruction cannot be conditional")
628 #define BAD_OVERLAP _("registers may not be the same")
629 #define BAD_HIREG _("lo register required")
630 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
631 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
632 #define BAD_BRANCH _("branch must be last instruction in IT block")
633 #define BAD_NOT_IT _("instruction not allowed in IT block")
635 static struct hash_control *arm_ops_hsh;
636 static struct hash_control *arm_cond_hsh;
637 static struct hash_control *arm_shift_hsh;
638 static struct hash_control *arm_psr_hsh;
639 static struct hash_control *arm_v7m_psr_hsh;
640 static struct hash_control *arm_reg_hsh;
641 static struct hash_control *arm_reloc_hsh;
642 static struct hash_control *arm_barrier_opt_hsh;
644 /* Stuff needed to resolve the label ambiguity
654 symbolS * last_label_seen;
655 static int label_is_thumb_function_name = FALSE;
657 /* Literal pool structure. Held on a per-section
658 and per-sub-section basis. */
660 #define MAX_LITERAL_POOL_SIZE 1024
661 typedef struct literal_pool
663 expressionS literals [MAX_LITERAL_POOL_SIZE];
664 unsigned int next_free_entry;
669 struct literal_pool * next;
672 /* Pointer to a linked list of literal pools. */
673 literal_pool * list_of_pools = NULL;
675 /* State variables for IT block handling. */
676 static bfd_boolean current_it_mask = 0;
677 static int current_cc;
682 /* This array holds the chars that always start a comment. If the
683 pre-processor is disabled, these aren't very useful. */
684 const char comment_chars[] = "@";
686 /* This array holds the chars that only start a comment at the beginning of
687 a line. If the line seems to have the form '# 123 filename'
688 .line and .file directives will appear in the pre-processed output. */
689 /* Note that input_file.c hand checks for '#' at the beginning of the
690 first line of the input file. This is because the compiler outputs
691 #NO_APP at the beginning of its output. */
692 /* Also note that comments like this one will always work. */
693 const char line_comment_chars[] = "#";
695 const char line_separator_chars[] = ";";
697 /* Chars that can be used to separate mant
698 from exp in floating point numbers. */
699 const char EXP_CHARS[] = "eE";
701 /* Chars that mean this number is a floating point constant. */
705 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
707 /* Prefix characters that indicate the start of an immediate
709 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
711 /* Separator character handling. */
713 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
716 skip_past_char (char ** str, char c)
726 #define skip_past_comma(str) skip_past_char (str, ',')
728 /* Arithmetic expressions (possibly involving symbols). */
730 /* Return TRUE if anything in the expression is a bignum. */
733 walk_no_bignums (symbolS * sp)
735 if (symbol_get_value_expression (sp)->X_op == O_big)
738 if (symbol_get_value_expression (sp)->X_add_symbol)
740 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
741 || (symbol_get_value_expression (sp)->X_op_symbol
742 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
748 static int in_my_get_expression = 0;
750 /* Third argument to my_get_expression. */
751 #define GE_NO_PREFIX 0
752 #define GE_IMM_PREFIX 1
753 #define GE_OPT_PREFIX 2
754 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
755 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
756 #define GE_OPT_PREFIX_BIG 3
759 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
764 /* In unified syntax, all prefixes are optional. */
766 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
771 case GE_NO_PREFIX: break;
773 if (!is_immediate_prefix (**str))
775 inst.error = _("immediate expression requires a # prefix");
781 case GE_OPT_PREFIX_BIG:
782 if (is_immediate_prefix (**str))
788 memset (ep, 0, sizeof (expressionS));
790 save_in = input_line_pointer;
791 input_line_pointer = *str;
792 in_my_get_expression = 1;
793 seg = expression (ep);
794 in_my_get_expression = 0;
796 if (ep->X_op == O_illegal)
798 /* We found a bad expression in md_operand(). */
799 *str = input_line_pointer;
800 input_line_pointer = save_in;
801 if (inst.error == NULL)
802 inst.error = _("bad expression");
807 if (seg != absolute_section
808 && seg != text_section
809 && seg != data_section
810 && seg != bss_section
811 && seg != undefined_section)
813 inst.error = _("bad segment");
814 *str = input_line_pointer;
815 input_line_pointer = save_in;
820 /* Get rid of any bignums now, so that we don't generate an error for which
821 we can't establish a line number later on. Big numbers are never valid
822 in instructions, which is where this routine is always called. */
823 if (prefix_mode != GE_OPT_PREFIX_BIG
824 && (ep->X_op == O_big
826 && (walk_no_bignums (ep->X_add_symbol)
828 && walk_no_bignums (ep->X_op_symbol))))))
830 inst.error = _("invalid constant");
831 *str = input_line_pointer;
832 input_line_pointer = save_in;
836 *str = input_line_pointer;
837 input_line_pointer = save_in;
841 /* Turn a string in input_line_pointer into a floating point constant
842 of type TYPE, and store the appropriate bytes in *LITP. The number
843 of LITTLENUMS emitted is stored in *SIZEP. An error message is
844 returned, or NULL on OK.
846 Note that fp constants aren't represent in the normal way on the ARM.
847 In big endian mode, things are as expected. However, in little endian
848 mode fp constants are big-endian word-wise, and little-endian byte-wise
849 within the words. For example, (double) 1.1 in big endian mode is
850 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
851 the byte sequence 99 99 f1 3f 9a 99 99 99.
853 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
856 md_atof (int type, char * litP, int * sizeP)
859 LITTLENUM_TYPE words[MAX_LITTLENUMS];
891 return _("bad call to MD_ATOF()");
894 t = atof_ieee (input_line_pointer, type, words);
896 input_line_pointer = t;
899 if (target_big_endian)
901 for (i = 0; i < prec; i++)
903 md_number_to_chars (litP, (valueT) words[i], 2);
909 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
910 for (i = prec - 1; i >= 0; i--)
912 md_number_to_chars (litP, (valueT) words[i], 2);
916 /* For a 4 byte float the order of elements in `words' is 1 0.
917 For an 8 byte float the order is 1 0 3 2. */
918 for (i = 0; i < prec; i += 2)
920 md_number_to_chars (litP, (valueT) words[i + 1], 2);
921 md_number_to_chars (litP + 2, (valueT) words[i], 2);
929 /* We handle all bad expressions here, so that we can report the faulty
930 instruction in the error message. */
932 md_operand (expressionS * expr)
934 if (in_my_get_expression)
935 expr->X_op = O_illegal;
938 /* Immediate values. */
940 /* Generic immediate-value read function for use in directives.
941 Accepts anything that 'expression' can fold to a constant.
942 *val receives the number. */
945 immediate_for_directive (int *val)
948 exp.X_op = O_illegal;
950 if (is_immediate_prefix (*input_line_pointer))
952 input_line_pointer++;
956 if (exp.X_op != O_constant)
958 as_bad (_("expected #constant"));
959 ignore_rest_of_line ();
962 *val = exp.X_add_number;
967 /* Register parsing. */
969 /* Generic register parser. CCP points to what should be the
970 beginning of a register name. If it is indeed a valid register
971 name, advance CCP over it and return the reg_entry structure;
972 otherwise return NULL. Does not issue diagnostics. */
974 static struct reg_entry *
975 arm_reg_parse_multi (char **ccp)
979 struct reg_entry *reg;
981 #ifdef REGISTER_PREFIX
982 if (*start != REGISTER_PREFIX)
986 #ifdef OPTIONAL_REGISTER_PREFIX
987 if (*start == OPTIONAL_REGISTER_PREFIX)
992 if (!ISALPHA (*p) || !is_name_beginner (*p))
997 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
999 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
1009 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1010 enum arm_reg_type type)
1012 /* Alternative syntaxes are accepted for a few register classes. */
1019 /* Generic coprocessor register names are allowed for these. */
1020 if (reg && reg->type == REG_TYPE_CN)
1025 /* For backward compatibility, a bare number is valid here. */
1027 unsigned long processor = strtoul (start, ccp, 10);
1028 if (*ccp != start && processor <= 15)
1032 case REG_TYPE_MMXWC:
1033 /* WC includes WCG. ??? I'm not sure this is true for all
1034 instructions that take WC registers. */
1035 if (reg && reg->type == REG_TYPE_MMXWCG)
1046 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1047 return value is the register number or FAIL. */
1050 arm_reg_parse (char **ccp, enum arm_reg_type type)
1053 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1056 /* Do not allow a scalar (reg+index) to parse as a register. */
1057 if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1060 if (reg && reg->type == type)
1063 if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1070 /* Parse a Neon type specifier. *STR should point at the leading '.'
1071 character. Does no verification at this stage that the type fits the opcode
1078 Can all be legally parsed by this function.
1080 Fills in neon_type struct pointer with parsed information, and updates STR
1081 to point after the parsed type specifier. Returns SUCCESS if this was a legal
1082 type, FAIL if not. */
1085 parse_neon_type (struct neon_type *type, char **str)
1092 while (type->elems < NEON_MAX_TYPE_ELS)
1094 enum neon_el_type thistype = NT_untyped;
1095 unsigned thissize = -1u;
1102 /* Just a size without an explicit type. */
1106 switch (TOLOWER (*ptr))
1108 case 'i': thistype = NT_integer; break;
1109 case 'f': thistype = NT_float; break;
1110 case 'p': thistype = NT_poly; break;
1111 case 's': thistype = NT_signed; break;
1112 case 'u': thistype = NT_unsigned; break;
1114 as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1120 /* .f is an abbreviation for .f32. */
1121 if (thistype == NT_float && !ISDIGIT (*ptr))
1126 thissize = strtoul (ptr, &ptr, 10);
1128 if (thissize != 8 && thissize != 16 && thissize != 32
1131 as_bad (_("bad size %d in type specifier"), thissize);
1138 type->el[type->elems].type = thistype;
1139 type->el[type->elems].size = thissize;
1144 /* Empty/missing type is not a successful parse. */
1145 if (type->elems == 0)
1153 /* Errors may be set multiple times during parsing or bit encoding
1154 (particularly in the Neon bits), but usually the earliest error which is set
1155 will be the most meaningful. Avoid overwriting it with later (cascading)
1156 errors by calling this function. */
1159 first_error (const char *err)
1165 /* Parse a single type, e.g. ".s32", leading period included. */
1167 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1170 struct neon_type optype;
1174 if (parse_neon_type (&optype, &str) == SUCCESS)
1176 if (optype.elems == 1)
1177 *vectype = optype.el[0];
1180 first_error (_("only one type should be specified for operand"));
1186 first_error (_("vector type expected"));
1198 /* Special meanings for indices (which have a range of 0-7), which will fit into
1201 #define NEON_ALL_LANES 15
1202 #define NEON_INTERLEAVE_LANES 14
1204 /* Parse either a register or a scalar, with an optional type. Return the
1205 register number, and optionally fill in the actual type of the register
1206 when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1207 type/index information in *TYPEINFO. */
1210 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1211 enum arm_reg_type *rtype,
1212 struct neon_typed_alias *typeinfo)
1215 struct reg_entry *reg = arm_reg_parse_multi (&str);
1216 struct neon_typed_alias atype;
1217 struct neon_type_el parsetype;
1221 atype.eltype.type = NT_invtype;
1222 atype.eltype.size = -1;
1224 /* Try alternate syntax for some types of register. Note these are mutually
1225 exclusive with the Neon syntax extensions. */
1228 int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1236 /* Undo polymorphism for Neon D and Q registers. */
1237 if (type == REG_TYPE_NDQ
1238 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1241 if (type != reg->type)
1247 if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1249 if ((atype.defined & NTA_HASTYPE) != 0)
1251 first_error (_("can't redefine type for operand"));
1254 atype.defined |= NTA_HASTYPE;
1255 atype.eltype = parsetype;
1258 if (skip_past_char (&str, '[') == SUCCESS)
1260 if (type != REG_TYPE_VFD)
1262 first_error (_("only D registers may be indexed"));
1266 if ((atype.defined & NTA_HASINDEX) != 0)
1268 first_error (_("can't change index for operand"));
1272 atype.defined |= NTA_HASINDEX;
1274 if (skip_past_char (&str, ']') == SUCCESS)
1275 atype.index = NEON_ALL_LANES;
1280 my_get_expression (&exp, &str, GE_NO_PREFIX);
1282 if (exp.X_op != O_constant)
1284 first_error (_("constant expression required"));
1288 if (skip_past_char (&str, ']') == FAIL)
1291 atype.index = exp.X_add_number;
1306 /* Like arm_reg_parse, but allow allow the following extra features:
1307 - If RTYPE is non-zero, return the (possibly restricted) type of the
1308 register (e.g. Neon double or quad reg when either has been requested).
1309 - If this is a Neon vector type with additional type information, fill
1310 in the struct pointed to by VECTYPE (if non-NULL).
1311 This function will fault on encountering a scalar.
1315 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1316 enum arm_reg_type *rtype, struct neon_type_el *vectype)
1318 struct neon_typed_alias atype;
1320 int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1325 /* Do not allow a scalar (reg+index) to parse as a register. */
1326 if ((atype.defined & NTA_HASINDEX) != 0)
1328 first_error (_("register operand expected, but got scalar"));
1333 *vectype = atype.eltype;
1340 #define NEON_SCALAR_REG(X) ((X) >> 4)
1341 #define NEON_SCALAR_INDEX(X) ((X) & 15)
1343 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1344 have enough information to be able to do a good job bounds-checking. So, we
1345 just do easy checks here, and do further checks later. */
1348 parse_scalar (char **ccp, int elsize, struct neon_type_el *type)
1352 struct neon_typed_alias atype;
1354 reg = parse_typed_reg_or_scalar (&str, REG_TYPE_VFD, NULL, &atype);
1356 if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1359 if (atype.index == NEON_ALL_LANES)
1361 first_error (_("scalar must have an index"));
1364 else if (atype.index >= 64 / elsize)
1366 first_error (_("scalar index out of range"));
1371 *type = atype.eltype;
1375 return reg * 16 + atype.index;
1378 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1380 parse_reg_list (char ** strp)
1382 char * str = * strp;
1386 /* We come back here if we get ranges concatenated by '+' or '|'. */
1401 if ((reg = arm_reg_parse (&str, REG_TYPE_RN)) == FAIL)
1403 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
1413 first_error (_("bad range in register list"));
1417 for (i = cur_reg + 1; i < reg; i++)
1419 if (range & (1 << i))
1421 (_("Warning: duplicated register (r%d) in register list"),
1429 if (range & (1 << reg))
1430 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1432 else if (reg <= cur_reg)
1433 as_tsktsk (_("Warning: register range not in ascending order"));
1438 while (skip_past_comma (&str) != FAIL
1439 || (in_range = 1, *str++ == '-'));
1444 first_error (_("missing `}'"));
1452 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1455 if (expr.X_op == O_constant)
1457 if (expr.X_add_number
1458 != (expr.X_add_number & 0x0000ffff))
1460 inst.error = _("invalid register mask");
1464 if ((range & expr.X_add_number) != 0)
1466 int regno = range & expr.X_add_number;
1469 regno = (1 << regno) - 1;
1471 (_("Warning: duplicated register (r%d) in register list"),
1475 range |= expr.X_add_number;
1479 if (inst.reloc.type != 0)
1481 inst.error = _("expression too complex");
1485 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1486 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1487 inst.reloc.pc_rel = 0;
1491 if (*str == '|' || *str == '+')
1497 while (another_range);
1503 /* Types of registers in a list. */
1512 /* Parse a VFP register list. If the string is invalid return FAIL.
1513 Otherwise return the number of registers, and set PBASE to the first
1514 register. Parses registers of type ETYPE.
1515 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1516 - Q registers can be used to specify pairs of D registers
1517 - { } can be omitted from around a singleton register list
1518 FIXME: This is not implemented, as it would require backtracking in
1521 This could be done (the meaning isn't really ambiguous), but doesn't
1522 fit in well with the current parsing framework.
1523 - 32 D registers may be used (also true for VFPv3).
1524 FIXME: Types are ignored in these register lists, which is probably a
1528 parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1532 enum arm_reg_type regtype = 0;
1536 unsigned long mask = 0;
1541 inst.error = _("expecting {");
1550 regtype = REG_TYPE_VFS;
1555 regtype = REG_TYPE_VFD;
1558 case REGLIST_NEON_D:
1559 regtype = REG_TYPE_NDQ;
1563 if (etype != REGLIST_VFP_S)
1565 /* VFPv3 allows 32 D registers. */
1566 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1570 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1573 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1580 base_reg = max_regs;
1584 int setmask = 1, addregs = 1;
1586 new_base = arm_typed_reg_parse (str, regtype, ®type, NULL);
1588 if (new_base == FAIL)
1590 first_error (_(reg_expected_msgs[regtype]));
1594 if (new_base >= max_regs)
1596 first_error (_("register out of range in list"));
1600 /* Note: a value of 2 * n is returned for the register Q<n>. */
1601 if (regtype == REG_TYPE_NQ)
1607 if (new_base < base_reg)
1608 base_reg = new_base;
1610 if (mask & (setmask << new_base))
1612 first_error (_("invalid register list"));
1616 if ((mask >> new_base) != 0 && ! warned)
1618 as_tsktsk (_("register list not in ascending order"));
1622 mask |= setmask << new_base;
1625 if (**str == '-') /* We have the start of a range expression */
1631 if ((high_range = arm_typed_reg_parse (str, regtype, NULL, NULL))
1634 inst.error = gettext (reg_expected_msgs[regtype]);
1638 if (high_range >= max_regs)
1640 first_error (_("register out of range in list"));
1644 if (regtype == REG_TYPE_NQ)
1645 high_range = high_range + 1;
1647 if (high_range <= new_base)
1649 inst.error = _("register range not in ascending order");
1653 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1655 if (mask & (setmask << new_base))
1657 inst.error = _("invalid register list");
1661 mask |= setmask << new_base;
1666 while (skip_past_comma (str) != FAIL);
1670 /* Sanity check -- should have raised a parse error above. */
1671 if (count == 0 || count > max_regs)
1676 /* Final test -- the registers must be consecutive. */
1678 for (i = 0; i < count; i++)
1680 if ((mask & (1u << i)) == 0)
1682 inst.error = _("non-contiguous register range");
1690 /* True if two alias types are the same. */
1693 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
1701 if (a->defined != b->defined)
1704 if ((a->defined & NTA_HASTYPE) != 0
1705 && (a->eltype.type != b->eltype.type
1706 || a->eltype.size != b->eltype.size))
1709 if ((a->defined & NTA_HASINDEX) != 0
1710 && (a->index != b->index))
1716 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1717 The base register is put in *PBASE.
1718 The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
1720 The register stride (minus one) is put in bit 4 of the return value.
1721 Bits [6:5] encode the list length (minus one).
1722 The type of the list elements is put in *ELTYPE, if non-NULL. */
1724 #define NEON_LANE(X) ((X) & 0xf)
1725 #define NEON_REG_STRIDE(X) ((((X) >> 4) & 1) + 1)
1726 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1729 parse_neon_el_struct_list (char **str, unsigned *pbase,
1730 struct neon_type_el *eltype)
1737 int leading_brace = 0;
1738 enum arm_reg_type rtype = REG_TYPE_NDQ;
1740 const char *const incr_error = "register stride must be 1 or 2";
1741 const char *const type_error = "mismatched element/structure types in list";
1742 struct neon_typed_alias firsttype;
1744 if (skip_past_char (&ptr, '{') == SUCCESS)
1749 struct neon_typed_alias atype;
1750 int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
1754 first_error (_(reg_expected_msgs[rtype]));
1761 if (rtype == REG_TYPE_NQ)
1768 else if (reg_incr == -1)
1770 reg_incr = getreg - base_reg;
1771 if (reg_incr < 1 || reg_incr > 2)
1773 first_error (_(incr_error));
1777 else if (getreg != base_reg + reg_incr * count)
1779 first_error (_(incr_error));
1783 if (!neon_alias_types_same (&atype, &firsttype))
1785 first_error (_(type_error));
1789 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1793 struct neon_typed_alias htype;
1794 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1796 lane = NEON_INTERLEAVE_LANES;
1797 else if (lane != NEON_INTERLEAVE_LANES)
1799 first_error (_(type_error));
1804 else if (reg_incr != 1)
1806 first_error (_("don't use Rn-Rm syntax with non-unit stride"));
1810 hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
1813 first_error (_(reg_expected_msgs[rtype]));
1816 if (!neon_alias_types_same (&htype, &firsttype))
1818 first_error (_(type_error));
1821 count += hireg + dregs - getreg;
1825 /* If we're using Q registers, we can't use [] or [n] syntax. */
1826 if (rtype == REG_TYPE_NQ)
1832 if ((atype.defined & NTA_HASINDEX) != 0)
1836 else if (lane != atype.index)
1838 first_error (_(type_error));
1842 else if (lane == -1)
1843 lane = NEON_INTERLEAVE_LANES;
1844 else if (lane != NEON_INTERLEAVE_LANES)
1846 first_error (_(type_error));
1851 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1853 /* No lane set by [x]. We must be interleaving structures. */
1855 lane = NEON_INTERLEAVE_LANES;
1858 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1859 || (count > 1 && reg_incr == -1))
1861 first_error (_("error parsing element/structure list"));
1865 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1867 first_error (_("expected }"));
1875 *eltype = firsttype.eltype;
1880 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1883 /* Parse an explicit relocation suffix on an expression. This is
1884 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1885 arm_reloc_hsh contains no entries, so this function can only
1886 succeed if there is no () after the word. Returns -1 on error,
1887 BFD_RELOC_UNUSED if there wasn't any suffix. */
1889 parse_reloc (char **str)
1891 struct reloc_entry *r;
1895 return BFD_RELOC_UNUSED;
1900 while (*q && *q != ')' && *q != ',')
1905 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1912 /* Directives: register aliases. */
1914 static struct reg_entry *
1915 insert_reg_alias (char *str, int number, int type)
1917 struct reg_entry *new;
1920 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1923 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1925 /* Only warn about a redefinition if it's not defined as the
1927 else if (new->number != number || new->type != type)
1928 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1933 name = xstrdup (str);
1934 new = xmalloc (sizeof (struct reg_entry));
1937 new->number = number;
1939 new->builtin = FALSE;
1942 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1949 insert_neon_reg_alias (char *str, int number, int type,
1950 struct neon_typed_alias *atype)
1952 struct reg_entry *reg = insert_reg_alias (str, number, type);
1956 first_error (_("attempt to redefine typed alias"));
1962 reg->neon = xmalloc (sizeof (struct neon_typed_alias));
1963 *reg->neon = *atype;
1967 /* Look for the .req directive. This is of the form:
1969 new_register_name .req existing_register_name
1971 If we find one, or if it looks sufficiently like one that we want to
1972 handle any error here, return non-zero. Otherwise return zero. */
1975 create_register_alias (char * newname, char *p)
1977 struct reg_entry *old;
1978 char *oldname, *nbuf;
1981 /* The input scrubber ensures that whitespace after the mnemonic is
1982 collapsed to single spaces. */
1984 if (strncmp (oldname, " .req ", 6) != 0)
1988 if (*oldname == '\0')
1991 old = hash_find (arm_reg_hsh, oldname);
1994 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1998 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1999 the desired alias name, and p points to its end. If not, then
2000 the desired alias name is in the global original_case_string. */
2001 #ifdef TC_CASE_SENSITIVE
2004 newname = original_case_string;
2005 nlen = strlen (newname);
2008 nbuf = alloca (nlen + 1);
2009 memcpy (nbuf, newname, nlen);
2012 /* Create aliases under the new name as stated; an all-lowercase
2013 version of the new name; and an all-uppercase version of the new
2015 insert_reg_alias (nbuf, old->number, old->type);
2017 for (p = nbuf; *p; p++)
2020 if (strncmp (nbuf, newname, nlen))
2021 insert_reg_alias (nbuf, old->number, old->type);
2023 for (p = nbuf; *p; p++)
2026 if (strncmp (nbuf, newname, nlen))
2027 insert_reg_alias (nbuf, old->number, old->type);
2032 /* Create a Neon typed/indexed register alias using directives, e.g.:
2037 These typed registers can be used instead of the types specified after the
2038 Neon mnemonic, so long as all operands given have types. Types can also be
2039 specified directly, e.g.:
2040 vadd d0.s32, d1.s32, d2.s32
2044 create_neon_reg_alias (char *newname, char *p)
2046 enum arm_reg_type basetype;
2047 struct reg_entry *basereg;
2048 struct reg_entry mybasereg;
2049 struct neon_type ntype;
2050 struct neon_typed_alias typeinfo;
2051 char *namebuf, *nameend;
2054 typeinfo.defined = 0;
2055 typeinfo.eltype.type = NT_invtype;
2056 typeinfo.eltype.size = -1;
2057 typeinfo.index = -1;
2061 if (strncmp (p, " .dn ", 5) == 0)
2062 basetype = REG_TYPE_VFD;
2063 else if (strncmp (p, " .qn ", 5) == 0)
2064 basetype = REG_TYPE_NQ;
2073 basereg = arm_reg_parse_multi (&p);
2075 if (basereg && basereg->type != basetype)
2077 as_bad (_("bad type for register"));
2081 if (basereg == NULL)
2084 /* Try parsing as an integer. */
2085 my_get_expression (&exp, &p, GE_NO_PREFIX);
2086 if (exp.X_op != O_constant)
2088 as_bad (_("expression must be constant"));
2091 basereg = &mybasereg;
2092 basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2098 typeinfo = *basereg->neon;
2100 if (parse_neon_type (&ntype, &p) == SUCCESS)
2102 /* We got a type. */
2103 if (typeinfo.defined & NTA_HASTYPE)
2105 as_bad (_("can't redefine the type of a register alias"));
2109 typeinfo.defined |= NTA_HASTYPE;
2110 if (ntype.elems != 1)
2112 as_bad (_("you must specify a single type only"));
2115 typeinfo.eltype = ntype.el[0];
2118 if (skip_past_char (&p, '[') == SUCCESS)
2121 /* We got a scalar index. */
2123 if (typeinfo.defined & NTA_HASINDEX)
2125 as_bad (_("can't redefine the index of a scalar alias"));
2129 my_get_expression (&exp, &p, GE_NO_PREFIX);
2131 if (exp.X_op != O_constant)
2133 as_bad (_("scalar index must be constant"));
2137 typeinfo.defined |= NTA_HASINDEX;
2138 typeinfo.index = exp.X_add_number;
2140 if (skip_past_char (&p, ']') == FAIL)
2142 as_bad (_("expecting ]"));
2147 namelen = nameend - newname;
2148 namebuf = alloca (namelen + 1);
2149 strncpy (namebuf, newname, namelen);
2150 namebuf[namelen] = '\0';
2152 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2153 typeinfo.defined != 0 ? &typeinfo : NULL);
2155 /* Insert name in all uppercase. */
2156 for (p = namebuf; *p; p++)
2159 if (strncmp (namebuf, newname, namelen))
2160 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2161 typeinfo.defined != 0 ? &typeinfo : NULL);
2163 /* Insert name in all lowercase. */
2164 for (p = namebuf; *p; p++)
2167 if (strncmp (namebuf, newname, namelen))
2168 insert_neon_reg_alias (namebuf, basereg->number, basetype,
2169 typeinfo.defined != 0 ? &typeinfo : NULL);
2174 /* Should never be called, as .req goes between the alias and the
2175 register name, not at the beginning of the line. */
2177 s_req (int a ATTRIBUTE_UNUSED)
2179 as_bad (_("invalid syntax for .req directive"));
2183 s_dn (int a ATTRIBUTE_UNUSED)
2185 as_bad (_("invalid syntax for .dn directive"));
2189 s_qn (int a ATTRIBUTE_UNUSED)
2191 as_bad (_("invalid syntax for .qn directive"));
2194 /* The .unreq directive deletes an alias which was previously defined
2195 by .req. For example:
2201 s_unreq (int a ATTRIBUTE_UNUSED)
2206 name = input_line_pointer;
2208 while (*input_line_pointer != 0
2209 && *input_line_pointer != ' '
2210 && *input_line_pointer != '\n')
2211 ++input_line_pointer;
2213 saved_char = *input_line_pointer;
2214 *input_line_pointer = 0;
2217 as_bad (_("invalid syntax for .unreq directive"));
2220 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
2223 as_bad (_("unknown register alias '%s'"), name);
2224 else if (reg->builtin)
2225 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
2229 hash_delete (arm_reg_hsh, name);
2230 free ((char *) reg->name);
2237 *input_line_pointer = saved_char;
2238 demand_empty_rest_of_line ();
2241 /* Directives: Instruction set selection. */
2244 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2245 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2246 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2247 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
2249 static enum mstate mapstate = MAP_UNDEFINED;
2252 mapping_state (enum mstate state)
2255 const char * symname;
2258 if (mapstate == state)
2259 /* The mapping symbol has already been emitted.
2260 There is nothing else to do. */
2269 type = BSF_NO_FLAGS;
2273 type = BSF_NO_FLAGS;
2277 type = BSF_NO_FLAGS;
2285 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
2287 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
2288 symbol_table_insert (symbolP);
2289 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2294 THUMB_SET_FUNC (symbolP, 0);
2295 ARM_SET_THUMB (symbolP, 0);
2296 ARM_SET_INTERWORK (symbolP, support_interwork);
2300 THUMB_SET_FUNC (symbolP, 1);
2301 ARM_SET_THUMB (symbolP, 1);
2302 ARM_SET_INTERWORK (symbolP, support_interwork);
2311 #define mapping_state(x) /* nothing */
2314 /* Find the real, Thumb encoded start of a Thumb function. */
2317 find_real_start (symbolS * symbolP)
2320 const char * name = S_GET_NAME (symbolP);
2321 symbolS * new_target;
2323 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
2324 #define STUB_NAME ".real_start_of"
2329 /* The compiler may generate BL instructions to local labels because
2330 it needs to perform a branch to a far away location. These labels
2331 do not have a corresponding ".real_start_of" label. We check
2332 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
2333 the ".real_start_of" convention for nonlocal branches. */
2334 if (S_IS_LOCAL (symbolP) || name[0] == '.')
2337 real_start = ACONCAT ((STUB_NAME, name, NULL));
2338 new_target = symbol_find (real_start);
2340 if (new_target == NULL)
2342 as_warn ("Failed to find real start of function: %s\n", name);
2343 new_target = symbolP;
2350 opcode_select (int width)
2357 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
2358 as_bad (_("selected processor does not support THUMB opcodes"));
2361 /* No need to force the alignment, since we will have been
2362 coming from ARM mode, which is word-aligned. */
2363 record_alignment (now_seg, 1);
2365 mapping_state (MAP_THUMB);
2371 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
2372 as_bad (_("selected processor does not support ARM opcodes"));
2377 frag_align (2, 0, 0);
2379 record_alignment (now_seg, 1);
2381 mapping_state (MAP_ARM);
2385 as_bad (_("invalid instruction size selected (%d)"), width);
2390 s_arm (int ignore ATTRIBUTE_UNUSED)
2393 demand_empty_rest_of_line ();
2397 s_thumb (int ignore ATTRIBUTE_UNUSED)
2400 demand_empty_rest_of_line ();
2404 s_code (int unused ATTRIBUTE_UNUSED)
2408 temp = get_absolute_expression ();
2413 opcode_select (temp);
2417 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
2422 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
2424 /* If we are not already in thumb mode go into it, EVEN if
2425 the target processor does not support thumb instructions.
2426 This is used by gcc/config/arm/lib1funcs.asm for example
2427 to compile interworking support functions even if the
2428 target processor should not support interworking. */
2432 record_alignment (now_seg, 1);
2435 demand_empty_rest_of_line ();
2439 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
2443 /* The following label is the name/address of the start of a Thumb function.
2444 We need to know this for the interworking support. */
2445 label_is_thumb_function_name = TRUE;
2448 /* Perform a .set directive, but also mark the alias as
2449 being a thumb function. */
2452 s_thumb_set (int equiv)
2454 /* XXX the following is a duplicate of the code for s_set() in read.c
2455 We cannot just call that code as we need to get at the symbol that
2462 /* Especial apologies for the random logic:
2463 This just grew, and could be parsed much more simply!
2465 name = input_line_pointer;
2466 delim = get_symbol_end ();
2467 end_name = input_line_pointer;
2470 if (*input_line_pointer != ',')
2473 as_bad (_("expected comma after name \"%s\""), name);
2475 ignore_rest_of_line ();
2479 input_line_pointer++;
2482 if (name[0] == '.' && name[1] == '\0')
2484 /* XXX - this should not happen to .thumb_set. */
2488 if ((symbolP = symbol_find (name)) == NULL
2489 && (symbolP = md_undefined_symbol (name)) == NULL)
2492 /* When doing symbol listings, play games with dummy fragments living
2493 outside the normal fragment chain to record the file and line info
2495 if (listing & LISTING_SYMBOLS)
2497 extern struct list_info_struct * listing_tail;
2498 fragS * dummy_frag = xmalloc (sizeof (fragS));
2500 memset (dummy_frag, 0, sizeof (fragS));
2501 dummy_frag->fr_type = rs_fill;
2502 dummy_frag->line = listing_tail;
2503 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
2504 dummy_frag->fr_symbol = symbolP;
2508 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2511 /* "set" symbols are local unless otherwise specified. */
2512 SF_SET_LOCAL (symbolP);
2513 #endif /* OBJ_COFF */
2514 } /* Make a new symbol. */
2516 symbol_table_insert (symbolP);
2521 && S_IS_DEFINED (symbolP)
2522 && S_GET_SEGMENT (symbolP) != reg_section)
2523 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2525 pseudo_set (symbolP);
2527 demand_empty_rest_of_line ();
2529 /* XXX Now we come to the Thumb specific bit of code. */
2531 THUMB_SET_FUNC (symbolP, 1);
2532 ARM_SET_THUMB (symbolP, 1);
2533 #if defined OBJ_ELF || defined OBJ_COFF
2534 ARM_SET_INTERWORK (symbolP, support_interwork);
2538 /* Directives: Mode selection. */
2540 /* .syntax [unified|divided] - choose the new unified syntax
2541 (same for Arm and Thumb encoding, modulo slight differences in what
2542 can be represented) or the old divergent syntax for each mode. */
2544 s_syntax (int unused ATTRIBUTE_UNUSED)
2548 name = input_line_pointer;
2549 delim = get_symbol_end ();
2551 if (!strcasecmp (name, "unified"))
2552 unified_syntax = TRUE;
2553 else if (!strcasecmp (name, "divided"))
2554 unified_syntax = FALSE;
2557 as_bad (_("unrecognized syntax mode \"%s\""), name);
2560 *input_line_pointer = delim;
2561 demand_empty_rest_of_line ();
2564 /* Directives: sectioning and alignment. */
2566 /* Same as s_align_ptwo but align 0 => align 2. */
2569 s_align (int unused ATTRIBUTE_UNUSED)
2573 long max_alignment = 15;
2575 temp = get_absolute_expression ();
2576 if (temp > max_alignment)
2577 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2580 as_bad (_("alignment negative. 0 assumed."));
2584 if (*input_line_pointer == ',')
2586 input_line_pointer++;
2587 temp_fill = get_absolute_expression ();
2595 /* Only make a frag if we HAVE to. */
2596 if (temp && !need_pass_2)
2597 frag_align (temp, (int) temp_fill, 0);
2598 demand_empty_rest_of_line ();
2600 record_alignment (now_seg, temp);
2604 s_bss (int ignore ATTRIBUTE_UNUSED)
2606 /* We don't support putting frags in the BSS segment, we fake it by
2607 marking in_bss, then looking at s_skip for clues. */
2608 subseg_set (bss_section, 0);
2609 demand_empty_rest_of_line ();
2610 mapping_state (MAP_DATA);
2614 s_even (int ignore ATTRIBUTE_UNUSED)
2616 /* Never make frag if expect extra pass. */
2618 frag_align (1, 0, 0);
2620 record_alignment (now_seg, 1);
2622 demand_empty_rest_of_line ();
2625 /* Directives: Literal pools. */
2627 static literal_pool *
2628 find_literal_pool (void)
2630 literal_pool * pool;
2632 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2634 if (pool->section == now_seg
2635 && pool->sub_section == now_subseg)
2642 static literal_pool *
2643 find_or_make_literal_pool (void)
2645 /* Next literal pool ID number. */
2646 static unsigned int latest_pool_num = 1;
2647 literal_pool * pool;
2649 pool = find_literal_pool ();
2653 /* Create a new pool. */
2654 pool = xmalloc (sizeof (* pool));
2658 pool->next_free_entry = 0;
2659 pool->section = now_seg;
2660 pool->sub_section = now_subseg;
2661 pool->next = list_of_pools;
2662 pool->symbol = NULL;
2664 /* Add it to the list. */
2665 list_of_pools = pool;
2668 /* New pools, and emptied pools, will have a NULL symbol. */
2669 if (pool->symbol == NULL)
2671 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2672 (valueT) 0, &zero_address_frag);
2673 pool->id = latest_pool_num ++;
2680 /* Add the literal in the global 'inst'
2681 structure to the relevent literal pool. */
2684 add_to_lit_pool (void)
2686 literal_pool * pool;
2689 pool = find_or_make_literal_pool ();
2691 /* Check if this literal value is already in the pool. */
2692 for (entry = 0; entry < pool->next_free_entry; entry ++)
2694 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2695 && (inst.reloc.exp.X_op == O_constant)
2696 && (pool->literals[entry].X_add_number
2697 == inst.reloc.exp.X_add_number)
2698 && (pool->literals[entry].X_unsigned
2699 == inst.reloc.exp.X_unsigned))
2702 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2703 && (inst.reloc.exp.X_op == O_symbol)
2704 && (pool->literals[entry].X_add_number
2705 == inst.reloc.exp.X_add_number)
2706 && (pool->literals[entry].X_add_symbol
2707 == inst.reloc.exp.X_add_symbol)
2708 && (pool->literals[entry].X_op_symbol
2709 == inst.reloc.exp.X_op_symbol))
2713 /* Do we need to create a new entry? */
2714 if (entry == pool->next_free_entry)
2716 if (entry >= MAX_LITERAL_POOL_SIZE)
2718 inst.error = _("literal pool overflow");
2722 pool->literals[entry] = inst.reloc.exp;
2723 pool->next_free_entry += 1;
2726 inst.reloc.exp.X_op = O_symbol;
2727 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2728 inst.reloc.exp.X_add_symbol = pool->symbol;
2733 /* Can't use symbol_new here, so have to create a symbol and then at
2734 a later date assign it a value. Thats what these functions do. */
2737 symbol_locate (symbolS * symbolP,
2738 const char * name, /* It is copied, the caller can modify. */
2739 segT segment, /* Segment identifier (SEG_<something>). */
2740 valueT valu, /* Symbol value. */
2741 fragS * frag) /* Associated fragment. */
2743 unsigned int name_length;
2744 char * preserved_copy_of_name;
2746 name_length = strlen (name) + 1; /* +1 for \0. */
2747 obstack_grow (¬es, name, name_length);
2748 preserved_copy_of_name = obstack_finish (¬es);
2750 #ifdef tc_canonicalize_symbol_name
2751 preserved_copy_of_name =
2752 tc_canonicalize_symbol_name (preserved_copy_of_name);
2755 S_SET_NAME (symbolP, preserved_copy_of_name);
2757 S_SET_SEGMENT (symbolP, segment);
2758 S_SET_VALUE (symbolP, valu);
2759 symbol_clear_list_pointers (symbolP);
2761 symbol_set_frag (symbolP, frag);
2763 /* Link to end of symbol chain. */
2765 extern int symbol_table_frozen;
2767 if (symbol_table_frozen)
2771 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2773 obj_symbol_new_hook (symbolP);
2775 #ifdef tc_symbol_new_hook
2776 tc_symbol_new_hook (symbolP);
2780 verify_symbol_chain (symbol_rootP, symbol_lastP);
2781 #endif /* DEBUG_SYMS */
2786 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2789 literal_pool * pool;
2792 pool = find_literal_pool ();
2794 || pool->symbol == NULL
2795 || pool->next_free_entry == 0)
2798 mapping_state (MAP_DATA);
2800 /* Align pool as you have word accesses.
2801 Only make a frag if we have to. */
2803 frag_align (2, 0, 0);
2805 record_alignment (now_seg, 2);
2807 sprintf (sym_name, "$$lit_\002%x", pool->id);
2809 symbol_locate (pool->symbol, sym_name, now_seg,
2810 (valueT) frag_now_fix (), frag_now);
2811 symbol_table_insert (pool->symbol);
2813 ARM_SET_THUMB (pool->symbol, thumb_mode);
2815 #if defined OBJ_COFF || defined OBJ_ELF
2816 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2819 for (entry = 0; entry < pool->next_free_entry; entry ++)
2820 /* First output the expression in the instruction to the pool. */
2821 emit_expr (&(pool->literals[entry]), 4); /* .word */
2823 /* Mark the pool as empty. */
2824 pool->next_free_entry = 0;
2825 pool->symbol = NULL;
2829 /* Forward declarations for functions below, in the MD interface
2831 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2832 static valueT create_unwind_entry (int);
2833 static void start_unwind_section (const segT, int);
2834 static void add_unwind_opcode (valueT, int);
2835 static void flush_pending_unwind (void);
2837 /* Directives: Data. */
2840 s_arm_elf_cons (int nbytes)
2844 #ifdef md_flush_pending_output
2845 md_flush_pending_output ();
2848 if (is_it_end_of_statement ())
2850 demand_empty_rest_of_line ();
2854 #ifdef md_cons_align
2855 md_cons_align (nbytes);
2858 mapping_state (MAP_DATA);
2862 char *base = input_line_pointer;
2866 if (exp.X_op != O_symbol)
2867 emit_expr (&exp, (unsigned int) nbytes);
2870 char *before_reloc = input_line_pointer;
2871 reloc = parse_reloc (&input_line_pointer);
2874 as_bad (_("unrecognized relocation suffix"));
2875 ignore_rest_of_line ();
2878 else if (reloc == BFD_RELOC_UNUSED)
2879 emit_expr (&exp, (unsigned int) nbytes);
2882 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2883 int size = bfd_get_reloc_size (howto);
2885 if (reloc == BFD_RELOC_ARM_PLT32)
2887 as_bad (_("(plt) is only valid on branch targets"));
2888 reloc = BFD_RELOC_UNUSED;
2893 as_bad (_("%s relocations do not fit in %d bytes"),
2894 howto->name, nbytes);
2897 /* We've parsed an expression stopping at O_symbol.
2898 But there may be more expression left now that we
2899 have parsed the relocation marker. Parse it again.
2900 XXX Surely there is a cleaner way to do this. */
2901 char *p = input_line_pointer;
2903 char *save_buf = alloca (input_line_pointer - base);
2904 memcpy (save_buf, base, input_line_pointer - base);
2905 memmove (base + (input_line_pointer - before_reloc),
2906 base, before_reloc - base);
2908 input_line_pointer = base + (input_line_pointer-before_reloc);
2910 memcpy (base, save_buf, p - base);
2912 offset = nbytes - size;
2913 p = frag_more ((int) nbytes);
2914 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2915 size, &exp, 0, reloc);
2920 while (*input_line_pointer++ == ',');
2922 /* Put terminator back into stream. */
2923 input_line_pointer --;
2924 demand_empty_rest_of_line ();
2928 /* Parse a .rel31 directive. */
2931 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2938 if (*input_line_pointer == '1')
2939 highbit = 0x80000000;
2940 else if (*input_line_pointer != '0')
2941 as_bad (_("expected 0 or 1"));
2943 input_line_pointer++;
2944 if (*input_line_pointer != ',')
2945 as_bad (_("missing comma"));
2946 input_line_pointer++;
2948 #ifdef md_flush_pending_output
2949 md_flush_pending_output ();
2952 #ifdef md_cons_align
2956 mapping_state (MAP_DATA);
2961 md_number_to_chars (p, highbit, 4);
2962 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2963 BFD_RELOC_ARM_PREL31);
2965 demand_empty_rest_of_line ();
2968 /* Directives: AEABI stack-unwind tables. */
2970 /* Parse an unwind_fnstart directive. Simply records the current location. */
2973 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2975 demand_empty_rest_of_line ();
2976 /* Mark the start of the function. */
2977 unwind.proc_start = expr_build_dot ();
2979 /* Reset the rest of the unwind info. */
2980 unwind.opcode_count = 0;
2981 unwind.table_entry = NULL;
2982 unwind.personality_routine = NULL;
2983 unwind.personality_index = -1;
2984 unwind.frame_size = 0;
2985 unwind.fp_offset = 0;
2988 unwind.sp_restored = 0;
2992 /* Parse a handlerdata directive. Creates the exception handling table entry
2993 for the function. */
2996 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2998 demand_empty_rest_of_line ();
2999 if (unwind.table_entry)
3000 as_bad (_("dupicate .handlerdata directive"));
3002 create_unwind_entry (1);
3005 /* Parse an unwind_fnend directive. Generates the index table entry. */
3008 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
3014 demand_empty_rest_of_line ();
3016 /* Add eh table entry. */
3017 if (unwind.table_entry == NULL)
3018 val = create_unwind_entry (0);
3022 /* Add index table entry. This is two words. */
3023 start_unwind_section (unwind.saved_seg, 1);
3024 frag_align (2, 0, 0);
3025 record_alignment (now_seg, 2);
3027 ptr = frag_more (8);
3028 where = frag_now_fix () - 8;
3030 /* Self relative offset of the function start. */
3031 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
3032 BFD_RELOC_ARM_PREL31);
3034 /* Indicate dependency on EHABI-defined personality routines to the
3035 linker, if it hasn't been done already. */
3036 if (unwind.personality_index >= 0 && unwind.personality_index < 3
3037 && !(marked_pr_dependency & (1 << unwind.personality_index)))
3039 static const char *const name[] = {
3040 "__aeabi_unwind_cpp_pr0",
3041 "__aeabi_unwind_cpp_pr1",
3042 "__aeabi_unwind_cpp_pr2"
3044 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
3045 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
3046 marked_pr_dependency |= 1 << unwind.personality_index;
3047 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
3048 = marked_pr_dependency;
3052 /* Inline exception table entry. */
3053 md_number_to_chars (ptr + 4, val, 4);
3055 /* Self relative offset of the table entry. */
3056 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
3057 BFD_RELOC_ARM_PREL31);
3059 /* Restore the original section. */
3060 subseg_set (unwind.saved_seg, unwind.saved_subseg);
3064 /* Parse an unwind_cantunwind directive. */
3067 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
3069 demand_empty_rest_of_line ();
3070 if (unwind.personality_routine || unwind.personality_index != -1)
3071 as_bad (_("personality routine specified for cantunwind frame"));
3073 unwind.personality_index = -2;
3077 /* Parse a personalityindex directive. */
3080 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
3084 if (unwind.personality_routine || unwind.personality_index != -1)
3085 as_bad (_("duplicate .personalityindex directive"));
3089 if (exp.X_op != O_constant
3090 || exp.X_add_number < 0 || exp.X_add_number > 15)
3092 as_bad (_("bad personality routine number"));
3093 ignore_rest_of_line ();
3097 unwind.personality_index = exp.X_add_number;
3099 demand_empty_rest_of_line ();
3103 /* Parse a personality directive. */
3106 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
3110 if (unwind.personality_routine || unwind.personality_index != -1)
3111 as_bad (_("duplicate .personality directive"));
3113 name = input_line_pointer;
3114 c = get_symbol_end ();
3115 p = input_line_pointer;
3116 unwind.personality_routine = symbol_find_or_make (name);
3118 demand_empty_rest_of_line ();
3122 /* Parse a directive saving core registers. */
3125 s_arm_unwind_save_core (void)
3131 range = parse_reg_list (&input_line_pointer);
3134 as_bad (_("expected register list"));
3135 ignore_rest_of_line ();
3139 demand_empty_rest_of_line ();
3141 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
3142 into .unwind_save {..., sp...}. We aren't bothered about the value of
3143 ip because it is clobbered by calls. */
3144 if (unwind.sp_restored && unwind.fp_reg == 12
3145 && (range & 0x3000) == 0x1000)
3147 unwind.opcode_count--;
3148 unwind.sp_restored = 0;
3149 range = (range | 0x2000) & ~0x1000;
3150 unwind.pending_offset = 0;
3156 /* See if we can use the short opcodes. These pop a block of up to 8
3157 registers starting with r4, plus maybe r14. */
3158 for (n = 0; n < 8; n++)
3160 /* Break at the first non-saved register. */
3161 if ((range & (1 << (n + 4))) == 0)
3164 /* See if there are any other bits set. */
3165 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
3167 /* Use the long form. */
3168 op = 0x8000 | ((range >> 4) & 0xfff);
3169 add_unwind_opcode (op, 2);
3173 /* Use the short form. */
3175 op = 0xa8; /* Pop r14. */
3177 op = 0xa0; /* Do not pop r14. */
3179 add_unwind_opcode (op, 1);
3186 op = 0xb100 | (range & 0xf);
3187 add_unwind_opcode (op, 2);
3190 /* Record the number of bytes pushed. */
3191 for (n = 0; n < 16; n++)
3193 if (range & (1 << n))
3194 unwind.frame_size += 4;
3199 /* Parse a directive saving FPA registers. */
3202 s_arm_unwind_save_fpa (int reg)
3208 /* Get Number of registers to transfer. */
3209 if (skip_past_comma (&input_line_pointer) != FAIL)
3212 exp.X_op = O_illegal;
3214 if (exp.X_op != O_constant)
3216 as_bad (_("expected , <constant>"));
3217 ignore_rest_of_line ();
3221 num_regs = exp.X_add_number;
3223 if (num_regs < 1 || num_regs > 4)
3225 as_bad (_("number of registers must be in the range [1:4]"));
3226 ignore_rest_of_line ();
3230 demand_empty_rest_of_line ();
3235 op = 0xb4 | (num_regs - 1);
3236 add_unwind_opcode (op, 1);
3241 op = 0xc800 | (reg << 4) | (num_regs - 1);
3242 add_unwind_opcode (op, 2);
3244 unwind.frame_size += num_regs * 12;
3248 /* Parse a directive saving VFP registers. */
3251 s_arm_unwind_save_vfp (void)
3257 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
3260 as_bad (_("expected register list"));
3261 ignore_rest_of_line ();
3265 demand_empty_rest_of_line ();
3270 op = 0xb8 | (count - 1);
3271 add_unwind_opcode (op, 1);
3276 op = 0xb300 | (reg << 4) | (count - 1);
3277 add_unwind_opcode (op, 2);
3279 unwind.frame_size += count * 8 + 4;
3283 /* Parse a directive saving iWMMXt data registers. */
3286 s_arm_unwind_save_mmxwr (void)
3294 if (*input_line_pointer == '{')
3295 input_line_pointer++;
3299 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3303 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3308 as_tsktsk (_("register list not in ascending order"));
3311 if (*input_line_pointer == '-')
3313 input_line_pointer++;
3314 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
3317 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
3320 else if (reg >= hi_reg)
3322 as_bad (_("bad register range"));
3325 for (; reg < hi_reg; reg++)
3329 while (skip_past_comma (&input_line_pointer) != FAIL);
3331 if (*input_line_pointer == '}')
3332 input_line_pointer++;
3334 demand_empty_rest_of_line ();
3336 /* Generate any deferred opcodes because we're going to be looking at
3338 flush_pending_unwind ();
3340 for (i = 0; i < 16; i++)
3342 if (mask & (1 << i))
3343 unwind.frame_size += 8;
3346 /* Attempt to combine with a previous opcode. We do this because gcc
3347 likes to output separate unwind directives for a single block of
3349 if (unwind.opcode_count > 0)
3351 i = unwind.opcodes[unwind.opcode_count - 1];
3352 if ((i & 0xf8) == 0xc0)
3355 /* Only merge if the blocks are contiguous. */
3358 if ((mask & 0xfe00) == (1 << 9))
3360 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
3361 unwind.opcode_count--;
3364 else if (i == 6 && unwind.opcode_count >= 2)
3366 i = unwind.opcodes[unwind.opcode_count - 2];
3370 op = 0xffff << (reg - 1);
3372 || ((mask & op) == (1u << (reg - 1))))
3374 op = (1 << (reg + i + 1)) - 1;
3375 op &= ~((1 << reg) - 1);
3377 unwind.opcode_count -= 2;
3384 /* We want to generate opcodes in the order the registers have been
3385 saved, ie. descending order. */
3386 for (reg = 15; reg >= -1; reg--)
3388 /* Save registers in blocks. */
3390 || !(mask & (1 << reg)))
3392 /* We found an unsaved reg. Generate opcodes to save the
3393 preceeding block. */
3399 op = 0xc0 | (hi_reg - 10);
3400 add_unwind_opcode (op, 1);
3405 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
3406 add_unwind_opcode (op, 2);
3415 ignore_rest_of_line ();
3419 s_arm_unwind_save_mmxwcg (void)
3426 if (*input_line_pointer == '{')
3427 input_line_pointer++;
3431 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3435 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3441 as_tsktsk (_("register list not in ascending order"));
3444 if (*input_line_pointer == '-')
3446 input_line_pointer++;
3447 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
3450 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
3453 else if (reg >= hi_reg)
3455 as_bad (_("bad register range"));
3458 for (; reg < hi_reg; reg++)
3462 while (skip_past_comma (&input_line_pointer) != FAIL);
3464 if (*input_line_pointer == '}')
3465 input_line_pointer++;
3467 demand_empty_rest_of_line ();
3469 /* Generate any deferred opcodes because we're going to be looking at
3471 flush_pending_unwind ();
3473 for (reg = 0; reg < 16; reg++)
3475 if (mask & (1 << reg))
3476 unwind.frame_size += 4;
3479 add_unwind_opcode (op, 2);
3482 ignore_rest_of_line ();
3486 /* Parse an unwind_save directive. */
3489 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
3492 struct reg_entry *reg;
3493 bfd_boolean had_brace = FALSE;
3495 /* Figure out what sort of save we have. */
3496 peek = input_line_pointer;
3504 reg = arm_reg_parse_multi (&peek);
3508 as_bad (_("register expected"));
3509 ignore_rest_of_line ();
3518 as_bad (_("FPA .unwind_save does not take a register list"));
3519 ignore_rest_of_line ();
3522 s_arm_unwind_save_fpa (reg->number);
3525 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3526 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3527 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3528 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3531 as_bad (_(".unwind_save does not support this kind of register"));
3532 ignore_rest_of_line ();
3537 /* Parse an unwind_movsp directive. */
3540 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3545 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3548 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3549 ignore_rest_of_line ();
3552 demand_empty_rest_of_line ();
3554 if (reg == REG_SP || reg == REG_PC)
3556 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3560 if (unwind.fp_reg != REG_SP)
3561 as_bad (_("unexpected .unwind_movsp directive"));
3563 /* Generate opcode to restore the value. */
3565 add_unwind_opcode (op, 1);
3567 /* Record the information for later. */
3568 unwind.fp_reg = reg;
3569 unwind.fp_offset = unwind.frame_size;
3570 unwind.sp_restored = 1;
3573 /* Parse an unwind_pad directive. */
3576 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3580 if (immediate_for_directive (&offset) == FAIL)
3585 as_bad (_("stack increment must be multiple of 4"));
3586 ignore_rest_of_line ();
3590 /* Don't generate any opcodes, just record the details for later. */
3591 unwind.frame_size += offset;
3592 unwind.pending_offset += offset;
3594 demand_empty_rest_of_line ();
3597 /* Parse an unwind_setfp directive. */
3600 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3606 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3607 if (skip_past_comma (&input_line_pointer) == FAIL)
3610 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
3612 if (fp_reg == FAIL || sp_reg == FAIL)
3614 as_bad (_("expected <reg>, <reg>"));
3615 ignore_rest_of_line ();
3619 /* Optional constant. */
3620 if (skip_past_comma (&input_line_pointer) != FAIL)
3622 if (immediate_for_directive (&offset) == FAIL)
3628 demand_empty_rest_of_line ();
3630 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3632 as_bad (_("register must be either sp or set by a previous"
3633 "unwind_movsp directive"));
3637 /* Don't generate any opcodes, just record the information for later. */
3638 unwind.fp_reg = fp_reg;
3641 unwind.fp_offset = unwind.frame_size - offset;
3643 unwind.fp_offset -= offset;
3646 /* Parse an unwind_raw directive. */
3649 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3652 /* This is an arbitrary limit. */
3653 unsigned char op[16];
3657 if (exp.X_op == O_constant
3658 && skip_past_comma (&input_line_pointer) != FAIL)
3660 unwind.frame_size += exp.X_add_number;
3664 exp.X_op = O_illegal;
3666 if (exp.X_op != O_constant)
3668 as_bad (_("expected <offset>, <opcode>"));
3669 ignore_rest_of_line ();
3675 /* Parse the opcode. */
3680 as_bad (_("unwind opcode too long"));
3681 ignore_rest_of_line ();
3683 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3685 as_bad (_("invalid unwind opcode"));
3686 ignore_rest_of_line ();
3689 op[count++] = exp.X_add_number;
3691 /* Parse the next byte. */
3692 if (skip_past_comma (&input_line_pointer) == FAIL)
3698 /* Add the opcode bytes in reverse order. */
3700 add_unwind_opcode (op[count], 1);
3702 demand_empty_rest_of_line ();
3706 /* Parse a .eabi_attribute directive. */
3709 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3712 bfd_boolean is_string;
3719 if (exp.X_op != O_constant)
3722 tag = exp.X_add_number;
3723 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3728 if (skip_past_comma (&input_line_pointer) == FAIL)
3730 if (tag == 32 || !is_string)
3733 if (exp.X_op != O_constant)
3735 as_bad (_("expected numeric constant"));
3736 ignore_rest_of_line ();
3739 i = exp.X_add_number;
3741 if (tag == Tag_compatibility
3742 && skip_past_comma (&input_line_pointer) == FAIL)
3744 as_bad (_("expected comma"));
3745 ignore_rest_of_line ();
3750 skip_whitespace(input_line_pointer);
3751 if (*input_line_pointer != '"')
3753 input_line_pointer++;
3754 s = input_line_pointer;
3755 while (*input_line_pointer && *input_line_pointer != '"')
3756 input_line_pointer++;
3757 if (*input_line_pointer != '"')
3759 saved_char = *input_line_pointer;
3760 *input_line_pointer = 0;
3768 if (tag == Tag_compatibility)
3769 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3771 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3773 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3777 *input_line_pointer = saved_char;
3778 input_line_pointer++;
3780 demand_empty_rest_of_line ();
3783 as_bad (_("bad string constant"));
3784 ignore_rest_of_line ();
3787 as_bad (_("expected <tag> , <value>"));
3788 ignore_rest_of_line ();
3790 #endif /* OBJ_ELF */
3792 static void s_arm_arch (int);
3793 static void s_arm_cpu (int);
3794 static void s_arm_fpu (int);
3796 /* This table describes all the machine specific pseudo-ops the assembler
3797 has to support. The fields are:
3798 pseudo-op name without dot
3799 function to call to execute this pseudo-op
3800 Integer arg to pass to the function. */
3802 const pseudo_typeS md_pseudo_table[] =
3804 /* Never called because '.req' does not start a line. */
3805 { "req", s_req, 0 },
3806 /* Following two are likewise never called. */
3809 { "unreq", s_unreq, 0 },
3810 { "bss", s_bss, 0 },
3811 { "align", s_align, 0 },
3812 { "arm", s_arm, 0 },
3813 { "thumb", s_thumb, 0 },
3814 { "code", s_code, 0 },
3815 { "force_thumb", s_force_thumb, 0 },
3816 { "thumb_func", s_thumb_func, 0 },
3817 { "thumb_set", s_thumb_set, 0 },
3818 { "even", s_even, 0 },
3819 { "ltorg", s_ltorg, 0 },
3820 { "pool", s_ltorg, 0 },
3821 { "syntax", s_syntax, 0 },
3822 { "cpu", s_arm_cpu, 0 },
3823 { "arch", s_arm_arch, 0 },
3824 { "fpu", s_arm_fpu, 0 },
3826 { "word", s_arm_elf_cons, 4 },
3827 { "long", s_arm_elf_cons, 4 },
3828 { "rel31", s_arm_rel31, 0 },
3829 { "fnstart", s_arm_unwind_fnstart, 0 },
3830 { "fnend", s_arm_unwind_fnend, 0 },
3831 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3832 { "personality", s_arm_unwind_personality, 0 },
3833 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3834 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3835 { "save", s_arm_unwind_save, 0 },
3836 { "movsp", s_arm_unwind_movsp, 0 },
3837 { "pad", s_arm_unwind_pad, 0 },
3838 { "setfp", s_arm_unwind_setfp, 0 },
3839 { "unwind_raw", s_arm_unwind_raw, 0 },
3840 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3844 { "extend", float_cons, 'x' },
3845 { "ldouble", float_cons, 'x' },
3846 { "packed", float_cons, 'p' },
3850 /* Parser functions used exclusively in instruction operands. */
3852 /* Generic immediate-value read function for use in insn parsing.
3853 STR points to the beginning of the immediate (the leading #);
3854 VAL receives the value; if the value is outside [MIN, MAX]
3855 issue an error. PREFIX_OPT is true if the immediate prefix is
3859 parse_immediate (char **str, int *val, int min, int max,
3860 bfd_boolean prefix_opt)
3863 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3864 if (exp.X_op != O_constant)
3866 inst.error = _("constant expression required");
3870 if (exp.X_add_number < min || exp.X_add_number > max)
3872 inst.error = _("immediate value out of range");
3876 *val = exp.X_add_number;
3880 /* Less-generic immediate-value read function with the possibility of loading a
3881 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3882 instructions. Puts the result directly in inst.operands[i]. */
3885 parse_big_immediate (char **str, int i)
3890 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3892 if (exp.X_op == O_constant)
3893 inst.operands[i].imm = exp.X_add_number;
3894 else if (exp.X_op == O_big
3895 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3896 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3898 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3899 /* Bignums have their least significant bits in
3900 generic_bignum[0]. Make sure we put 32 bits in imm and
3901 32 bits in reg, in a (hopefully) portable way. */
3902 assert (parts != 0);
3903 inst.operands[i].imm = 0;
3904 for (j = 0; j < parts; j++, idx++)
3905 inst.operands[i].imm |= generic_bignum[idx]
3906 << (LITTLENUM_NUMBER_OF_BITS * j);
3907 inst.operands[i].reg = 0;
3908 for (j = 0; j < parts; j++, idx++)
3909 inst.operands[i].reg |= generic_bignum[idx]
3910 << (LITTLENUM_NUMBER_OF_BITS * j);
3911 inst.operands[i].regisimm = 1;
3921 /* Returns the pseudo-register number of an FPA immediate constant,
3922 or FAIL if there isn't a valid constant here. */
3925 parse_fpa_immediate (char ** str)
3927 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3933 /* First try and match exact strings, this is to guarantee
3934 that some formats will work even for cross assembly. */
3936 for (i = 0; fp_const[i]; i++)
3938 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3942 *str += strlen (fp_const[i]);
3943 if (is_end_of_line[(unsigned char) **str])
3949 /* Just because we didn't get a match doesn't mean that the constant
3950 isn't valid, just that it is in a format that we don't
3951 automatically recognize. Try parsing it with the standard
3952 expression routines. */
3954 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3956 /* Look for a raw floating point number. */
3957 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3958 && is_end_of_line[(unsigned char) *save_in])
3960 for (i = 0; i < NUM_FLOAT_VALS; i++)
3962 for (j = 0; j < MAX_LITTLENUMS; j++)
3964 if (words[j] != fp_values[i][j])
3968 if (j == MAX_LITTLENUMS)
3976 /* Try and parse a more complex expression, this will probably fail
3977 unless the code uses a floating point prefix (eg "0f"). */
3978 save_in = input_line_pointer;
3979 input_line_pointer = *str;
3980 if (expression (&exp) == absolute_section
3981 && exp.X_op == O_big
3982 && exp.X_add_number < 0)
3984 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3986 if (gen_to_words (words, 5, (long) 15) == 0)
3988 for (i = 0; i < NUM_FLOAT_VALS; i++)
3990 for (j = 0; j < MAX_LITTLENUMS; j++)
3992 if (words[j] != fp_values[i][j])
3996 if (j == MAX_LITTLENUMS)
3998 *str = input_line_pointer;
3999 input_line_pointer = save_in;
4006 *str = input_line_pointer;
4007 input_line_pointer = save_in;
4008 inst.error = _("invalid FPA immediate expression");
4012 /* Returns 1 if a number has "quarter-precision" float format
4013 0baBbbbbbc defgh000 00000000 00000000. */
4016 is_quarter_float (unsigned imm)
4018 int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
4019 return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
4022 /* Parse an 8-bit "quarter-precision" floating point number of the form:
4023 0baBbbbbbc defgh000 00000000 00000000.
4024 The minus-zero case needs special handling, since it can't be encoded in the
4025 "quarter-precision" float format, but can nonetheless be loaded as an integer
4029 parse_qfloat_immediate (char **ccp, int *immed)
4032 LITTLENUM_TYPE words[MAX_LITTLENUMS];
4034 skip_past_char (&str, '#');
4036 if ((str = atof_ieee (str, 's', words)) != NULL)
4038 unsigned fpword = 0;
4041 /* Our FP word must be 32 bits (single-precision FP). */
4042 for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
4044 fpword <<= LITTLENUM_NUMBER_OF_BITS;
4048 if (is_quarter_float (fpword) || fpword == 0x80000000)
4061 /* Shift operands. */
4064 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
4067 struct asm_shift_name
4070 enum shift_kind kind;
4073 /* Third argument to parse_shift. */
4074 enum parse_shift_mode
4076 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
4077 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
4078 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
4079 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
4080 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
4083 /* Parse a <shift> specifier on an ARM data processing instruction.
4084 This has three forms:
4086 (LSL|LSR|ASL|ASR|ROR) Rs
4087 (LSL|LSR|ASL|ASR|ROR) #imm
4090 Note that ASL is assimilated to LSL in the instruction encoding, and
4091 RRX to ROR #0 (which cannot be written as such). */
4094 parse_shift (char **str, int i, enum parse_shift_mode mode)
4096 const struct asm_shift_name *shift_name;
4097 enum shift_kind shift;
4102 for (p = *str; ISALPHA (*p); p++)
4107 inst.error = _("shift expression expected");
4111 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
4113 if (shift_name == NULL)
4115 inst.error = _("shift expression expected");
4119 shift = shift_name->kind;
4123 case NO_SHIFT_RESTRICT:
4124 case SHIFT_IMMEDIATE: break;
4126 case SHIFT_LSL_OR_ASR_IMMEDIATE:
4127 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
4129 inst.error = _("'LSL' or 'ASR' required");
4134 case SHIFT_LSL_IMMEDIATE:
4135 if (shift != SHIFT_LSL)
4137 inst.error = _("'LSL' required");
4142 case SHIFT_ASR_IMMEDIATE:
4143 if (shift != SHIFT_ASR)
4145 inst.error = _("'ASR' required");
4153 if (shift != SHIFT_RRX)
4155 /* Whitespace can appear here if the next thing is a bare digit. */
4156 skip_whitespace (p);
4158 if (mode == NO_SHIFT_RESTRICT
4159 && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4161 inst.operands[i].imm = reg;
4162 inst.operands[i].immisreg = 1;
4164 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4167 inst.operands[i].shift_kind = shift;
4168 inst.operands[i].shifted = 1;
4173 /* Parse a <shifter_operand> for an ARM data processing instruction:
4176 #<immediate>, <rotate>
4180 where <shift> is defined by parse_shift above, and <rotate> is a
4181 multiple of 2 between 0 and 30. Validation of immediate operands
4182 is deferred to md_apply_fix. */
4185 parse_shifter_operand (char **str, int i)
4190 if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
4192 inst.operands[i].reg = value;
4193 inst.operands[i].isreg = 1;
4195 /* parse_shift will override this if appropriate */
4196 inst.reloc.exp.X_op = O_constant;
4197 inst.reloc.exp.X_add_number = 0;
4199 if (skip_past_comma (str) == FAIL)
4202 /* Shift operation on register. */
4203 return parse_shift (str, i, NO_SHIFT_RESTRICT);
4206 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
4209 if (skip_past_comma (str) == SUCCESS)
4211 /* #x, y -- ie explicit rotation by Y. */
4212 if (my_get_expression (&expr, str, GE_NO_PREFIX))
4215 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
4217 inst.error = _("constant expression expected");
4221 value = expr.X_add_number;
4222 if (value < 0 || value > 30 || value % 2 != 0)
4224 inst.error = _("invalid rotation");
4227 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
4229 inst.error = _("invalid constant");
4233 /* Convert to decoded value. md_apply_fix will put it back. */
4234 inst.reloc.exp.X_add_number
4235 = (((inst.reloc.exp.X_add_number << (32 - value))
4236 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
4239 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
4240 inst.reloc.pc_rel = 0;
4244 /* Parse all forms of an ARM address expression. Information is written
4245 to inst.operands[i] and/or inst.reloc.
4247 Preindexed addressing (.preind=1):
4249 [Rn, #offset] .reg=Rn .reloc.exp=offset
4250 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4251 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4252 .shift_kind=shift .reloc.exp=shift_imm
4254 These three may have a trailing ! which causes .writeback to be set also.
4256 Postindexed addressing (.postind=1, .writeback=1):
4258 [Rn], #offset .reg=Rn .reloc.exp=offset
4259 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4260 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
4261 .shift_kind=shift .reloc.exp=shift_imm
4263 Unindexed addressing (.preind=0, .postind=0):
4265 [Rn], {option} .reg=Rn .imm=option .immisreg=0
4269 [Rn]{!} shorthand for [Rn,#0]{!}
4270 =immediate .isreg=0 .reloc.exp=immediate
4271 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
4273 It is the caller's responsibility to check for addressing modes not
4274 supported by the instruction, and to set inst.reloc.type. */
4277 parse_address (char **str, int i)
4282 if (skip_past_char (&p, '[') == FAIL)
4284 if (skip_past_char (&p, '=') == FAIL)
4286 /* bare address - translate to PC-relative offset */
4287 inst.reloc.pc_rel = 1;
4288 inst.operands[i].reg = REG_PC;
4289 inst.operands[i].isreg = 1;
4290 inst.operands[i].preind = 1;
4292 /* else a load-constant pseudo op, no special treatment needed here */
4294 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4301 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4303 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4306 inst.operands[i].reg = reg;
4307 inst.operands[i].isreg = 1;
4309 if (skip_past_comma (&p) == SUCCESS)
4311 inst.operands[i].preind = 1;
4314 else if (*p == '-') p++, inst.operands[i].negative = 1;
4316 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4318 inst.operands[i].imm = reg;
4319 inst.operands[i].immisreg = 1;
4321 if (skip_past_comma (&p) == SUCCESS)
4322 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4325 else if (skip_past_char (&p, ':') == SUCCESS)
4327 /* FIXME: '@' should be used here, but it's filtered out by generic
4328 code before we get to see it here. This may be subject to
4331 my_get_expression (&exp, &p, GE_NO_PREFIX);
4332 if (exp.X_op != O_constant)
4334 inst.error = _("alignment must be constant");
4337 inst.operands[i].imm = exp.X_add_number << 8;
4338 inst.operands[i].immisalign = 1;
4339 /* Alignments are not pre-indexes. */
4340 inst.operands[i].preind = 0;
4344 if (inst.operands[i].negative)
4346 inst.operands[i].negative = 0;
4349 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4354 if (skip_past_char (&p, ']') == FAIL)
4356 inst.error = _("']' expected");
4360 if (skip_past_char (&p, '!') == SUCCESS)
4361 inst.operands[i].writeback = 1;
4363 else if (skip_past_comma (&p) == SUCCESS)
4365 if (skip_past_char (&p, '{') == SUCCESS)
4367 /* [Rn], {expr} - unindexed, with option */
4368 if (parse_immediate (&p, &inst.operands[i].imm,
4369 0, 255, TRUE) == FAIL)
4372 if (skip_past_char (&p, '}') == FAIL)
4374 inst.error = _("'}' expected at end of 'option' field");
4377 if (inst.operands[i].preind)
4379 inst.error = _("cannot combine index with option");
4387 inst.operands[i].postind = 1;
4388 inst.operands[i].writeback = 1;
4390 if (inst.operands[i].preind)
4392 inst.error = _("cannot combine pre- and post-indexing");
4397 else if (*p == '-') p++, inst.operands[i].negative = 1;
4399 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
4401 /* We might be using the immediate for alignment already. If we
4402 are, OR the register number into the low-order bits. */
4403 if (inst.operands[i].immisalign)
4404 inst.operands[i].imm |= reg;
4406 inst.operands[i].imm = reg;
4407 inst.operands[i].immisreg = 1;
4409 if (skip_past_comma (&p) == SUCCESS)
4410 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
4415 if (inst.operands[i].negative)
4417 inst.operands[i].negative = 0;
4420 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
4426 /* If at this point neither .preind nor .postind is set, we have a
4427 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
4428 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
4430 inst.operands[i].preind = 1;
4431 inst.reloc.exp.X_op = O_constant;
4432 inst.reloc.exp.X_add_number = 0;
4438 /* Parse an operand for a MOVW or MOVT instruction. */
4440 parse_half (char **str)
4445 skip_past_char (&p, '#');
4446 if (strncasecmp (p, ":lower16:", 9) == 0)
4447 inst.reloc.type = BFD_RELOC_ARM_MOVW;
4448 else if (strncasecmp (p, ":upper16:", 9) == 0)
4449 inst.reloc.type = BFD_RELOC_ARM_MOVT;
4451 if (inst.reloc.type != BFD_RELOC_UNUSED)
4457 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
4460 if (inst.reloc.type == BFD_RELOC_UNUSED)
4462 if (inst.reloc.exp.X_op != O_constant)
4464 inst.error = _("constant expression expected");
4467 if (inst.reloc.exp.X_add_number < 0
4468 || inst.reloc.exp.X_add_number > 0xffff)
4470 inst.error = _("immediate value out of range");
4478 /* Miscellaneous. */
4480 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
4481 or a bitmask suitable to be or-ed into the ARM msr instruction. */
4483 parse_psr (char **str)
4486 unsigned long psr_field;
4487 const struct asm_psr *psr;
4490 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
4491 feature for ease of use and backwards compatibility. */
4493 if (strncasecmp (p, "SPSR", 4) == 0)
4494 psr_field = SPSR_BIT;
4495 else if (strncasecmp (p, "CPSR", 4) == 0)
4502 while (ISALNUM (*p) || *p == '_');
4504 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
4515 /* A suffix follows. */
4521 while (ISALNUM (*p) || *p == '_');
4523 psr = hash_find_n (arm_psr_hsh, start, p - start);
4527 psr_field |= psr->field;
4532 goto error; /* Garbage after "[CS]PSR". */
4534 psr_field |= (PSR_c | PSR_f);
4540 inst.error = _("flag for {c}psr instruction expected");
4544 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
4545 value suitable for splatting into the AIF field of the instruction. */
4548 parse_cps_flags (char **str)
4557 case '\0': case ',':
4560 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
4561 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
4562 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
4565 inst.error = _("unrecognized CPS flag");
4570 if (saw_a_flag == 0)
4572 inst.error = _("missing CPS flags");
4580 /* Parse an endian specifier ("BE" or "LE", case insensitive);
4581 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
4584 parse_endian_specifier (char **str)
4589 if (strncasecmp (s, "BE", 2))
4591 else if (strncasecmp (s, "LE", 2))
4595 inst.error = _("valid endian specifiers are be or le");
4599 if (ISALNUM (s[2]) || s[2] == '_')
4601 inst.error = _("valid endian specifiers are be or le");
4606 return little_endian;
4609 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4610 value suitable for poking into the rotate field of an sxt or sxta
4611 instruction, or FAIL on error. */
4614 parse_ror (char **str)
4619 if (strncasecmp (s, "ROR", 3) == 0)
4623 inst.error = _("missing rotation field after comma");
4627 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4632 case 0: *str = s; return 0x0;
4633 case 8: *str = s; return 0x1;
4634 case 16: *str = s; return 0x2;
4635 case 24: *str = s; return 0x3;
4638 inst.error = _("rotation can only be 0, 8, 16, or 24");
4643 /* Parse a conditional code (from conds[] below). The value returned is in the
4644 range 0 .. 14, or FAIL. */
4646 parse_cond (char **str)
4649 const struct asm_cond *c;
4652 while (ISALPHA (*q))
4655 c = hash_find_n (arm_cond_hsh, p, q - p);
4658 inst.error = _("condition required");
4666 /* Parse an option for a barrier instruction. Returns the encoding for the
4669 parse_barrier (char **str)
4672 const struct asm_barrier_opt *o;
4675 while (ISALPHA (*q))
4678 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4686 /* Parse the operands of a table branch instruction. Similar to a memory
4689 parse_tb (char **str)
4694 if (skip_past_char (&p, '[') == FAIL)
4696 inst.error = _("'[' expected");
4700 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4702 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4705 inst.operands[0].reg = reg;
4707 if (skip_past_comma (&p) == FAIL)
4709 inst.error = _("',' expected");
4713 if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
4715 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4718 inst.operands[0].imm = reg;
4720 if (skip_past_comma (&p) == SUCCESS)
4722 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4724 if (inst.reloc.exp.X_add_number != 1)
4726 inst.error = _("invalid shift");
4729 inst.operands[0].shifted = 1;
4732 if (skip_past_char (&p, ']') == FAIL)
4734 inst.error = _("']' expected");
4741 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4742 information on the types the operands can take and how they are encoded.
4743 Note particularly the abuse of ".regisimm" to signify a Neon register.
4744 Up to three operands may be read; this function handles setting the
4745 ".present" field for each operand itself.
4746 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4747 else returns FAIL. */
4750 parse_neon_mov (char **str, int *which_operand)
4752 int i = *which_operand, val;
4753 enum arm_reg_type rtype;
4755 struct neon_type_el optype;
4757 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4759 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4760 inst.operands[i].reg = val;
4761 inst.operands[i].isscalar = 1;
4762 inst.operands[i].vectype = optype;
4763 inst.operands[i++].present = 1;
4765 if (skip_past_comma (&ptr) == FAIL)
4768 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4771 inst.operands[i].reg = val;
4772 inst.operands[i].isreg = 1;
4773 inst.operands[i].present = 1;
4775 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4778 /* Cases 0, 1, 2, 3, 5 (D only). */
4779 if (skip_past_comma (&ptr) == FAIL)
4782 inst.operands[i].reg = val;
4783 inst.operands[i].isreg = 1;
4784 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4785 inst.operands[i].vectype = optype;
4786 inst.operands[i++].present = 1;
4788 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4790 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4791 inst.operands[i-1].regisimm = 1;
4792 inst.operands[i].reg = val;
4793 inst.operands[i].isreg = 1;
4794 inst.operands[i++].present = 1;
4796 if (rtype == REG_TYPE_NQ)
4798 first_error (_("can't use Neon quad register here"));
4801 if (skip_past_comma (&ptr) == FAIL)
4803 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
4805 inst.operands[i].reg = val;
4806 inst.operands[i].isreg = 1;
4807 inst.operands[i].present = 1;
4809 else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
4811 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
4812 Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm> */
4813 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4816 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4818 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4819 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4820 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4823 else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_NDQ, &rtype, &optype))
4826 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4827 Case 1: VMOV<c><q> <Dd>, <Dm> */
4828 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4831 inst.operands[i].reg = val;
4832 inst.operands[i].isreg = 1;
4833 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4834 inst.operands[i].vectype = optype;
4835 inst.operands[i].present = 1;
4839 first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
4843 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4846 inst.operands[i].reg = val;
4847 inst.operands[i].isreg = 1;
4848 inst.operands[i++].present = 1;
4850 if (skip_past_comma (&ptr) == FAIL)
4853 if ((val = parse_scalar (&ptr, 8, &optype)) != FAIL)
4855 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4856 inst.operands[i].reg = val;
4857 inst.operands[i].isscalar = 1;
4858 inst.operands[i].present = 1;
4859 inst.operands[i].vectype = optype;
4861 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
4863 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4864 inst.operands[i].reg = val;
4865 inst.operands[i].isreg = 1;
4866 inst.operands[i++].present = 1;
4868 if (skip_past_comma (&ptr) == FAIL)
4871 if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFD, NULL, &optype))
4874 first_error (_(reg_expected_msgs[REG_TYPE_VFD]));
4878 inst.operands[i].reg = val;
4879 inst.operands[i].isreg = 1;
4880 inst.operands[i].regisimm = 1;
4881 inst.operands[i].vectype = optype;
4882 inst.operands[i].present = 1;
4887 first_error (_("parse error"));
4891 /* Successfully parsed the operands. Update args. */
4897 first_error (_("expected comma"));
4901 first_error (_(reg_expected_msgs[REG_TYPE_RN]));
4905 first_error (_("instruction cannot be conditionalized"));
4909 /* Matcher codes for parse_operands. */
4910 enum operand_parse_code
4912 OP_stop, /* end of line */
4914 OP_RR, /* ARM register */
4915 OP_RRnpc, /* ARM register, not r15 */
4916 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4917 OP_RRw, /* ARM register, not r15, optional trailing ! */
4918 OP_RCP, /* Coprocessor number */
4919 OP_RCN, /* Coprocessor register */
4920 OP_RF, /* FPA register */
4921 OP_RVS, /* VFP single precision register */
4922 OP_RVD, /* VFP double precision register (0..15) */
4923 OP_RND, /* Neon double precision register (0..31) */
4924 OP_RNQ, /* Neon quad precision register */
4925 OP_RNDQ, /* Neon double or quad precision register */
4926 OP_RNSC, /* Neon scalar D[X] */
4927 OP_RVC, /* VFP control register */
4928 OP_RMF, /* Maverick F register */
4929 OP_RMD, /* Maverick D register */
4930 OP_RMFX, /* Maverick FX register */
4931 OP_RMDX, /* Maverick DX register */
4932 OP_RMAX, /* Maverick AX register */
4933 OP_RMDS, /* Maverick DSPSC register */
4934 OP_RIWR, /* iWMMXt wR register */
4935 OP_RIWC, /* iWMMXt wC register */
4936 OP_RIWG, /* iWMMXt wCG register */
4937 OP_RXA, /* XScale accumulator register */
4939 OP_REGLST, /* ARM register list */
4940 OP_VRSLST, /* VFP single-precision register list */
4941 OP_VRDLST, /* VFP double-precision register list */
4942 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4943 OP_NSTRLST, /* Neon element/structure list */
4945 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4946 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4947 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4948 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4949 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4950 OP_VMOV, /* Neon VMOV operands. */
4951 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4952 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4954 OP_I0, /* immediate zero */
4955 OP_I7, /* immediate value 0 .. 7 */
4956 OP_I15, /* 0 .. 15 */
4957 OP_I16, /* 1 .. 16 */
4958 OP_I16z, /* 0 .. 16 */
4959 OP_I31, /* 0 .. 31 */
4960 OP_I31w, /* 0 .. 31, optional trailing ! */
4961 OP_I32, /* 1 .. 32 */
4962 OP_I32z, /* 0 .. 32 */
4963 OP_I63, /* 0 .. 63 */
4964 OP_I63s, /* -64 .. 63 */
4965 OP_I64, /* 1 .. 64 */
4966 OP_I64z, /* 0 .. 64 */
4967 OP_I255, /* 0 .. 255 */
4969 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4970 OP_I7b, /* 0 .. 7 */
4971 OP_I15b, /* 0 .. 15 */
4972 OP_I31b, /* 0 .. 31 */
4974 OP_SH, /* shifter operand */
4975 OP_ADDR, /* Memory address expression (any mode) */
4976 OP_EXP, /* arbitrary expression */
4977 OP_EXPi, /* same, with optional immediate prefix */
4978 OP_EXPr, /* same, with optional relocation suffix */
4979 OP_HALF, /* 0 .. 65535 or low/high reloc. */
4981 OP_CPSF, /* CPS flags */
4982 OP_ENDI, /* Endianness specifier */
4983 OP_PSR, /* CPSR/SPSR mask for msr */
4984 OP_COND, /* conditional code */
4985 OP_TB, /* Table branch. */
4987 OP_RRnpc_I0, /* ARM register or literal 0 */
4988 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4989 OP_RR_EXi, /* ARM register or expression with imm prefix */
4990 OP_RF_IF, /* FPA register or immediate */
4991 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4993 /* Optional operands. */
4994 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4995 OP_oI31b, /* 0 .. 31 */
4996 OP_oI32b, /* 1 .. 32 */
4997 OP_oIffffb, /* 0 .. 65535 */
4998 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
5000 OP_oRR, /* ARM register */
5001 OP_oRRnpc, /* ARM register, not the PC */
5002 OP_oRND, /* Optional Neon double precision register */
5003 OP_oRNQ, /* Optional Neon quad precision register */
5004 OP_oRNDQ, /* Optional Neon double or quad precision register */
5005 OP_oSHll, /* LSL immediate */
5006 OP_oSHar, /* ASR immediate */
5007 OP_oSHllar, /* LSL or ASR immediate */
5008 OP_oROR, /* ROR 0/8/16/24 */
5009 OP_oBARRIER, /* Option argument for a barrier instruction. */
5011 OP_FIRST_OPTIONAL = OP_oI7b
5014 /* Generic instruction operand parser. This does no encoding and no
5015 semantic validation; it merely squirrels values away in the inst
5016 structure. Returns SUCCESS or FAIL depending on whether the
5017 specified grammar matched. */
5019 parse_operands (char *str, const unsigned char *pattern)
5021 unsigned const char *upat = pattern;
5022 char *backtrack_pos = 0;
5023 const char *backtrack_error = 0;
5024 int i, val, backtrack_index = 0;
5025 enum arm_reg_type rtype;
5027 #define po_char_or_fail(chr) do { \
5028 if (skip_past_char (&str, chr) == FAIL) \
5032 #define po_reg_or_fail(regtype) do { \
5033 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5034 &inst.operands[i].vectype); \
5037 first_error (_(reg_expected_msgs[regtype])); \
5040 inst.operands[i].reg = val; \
5041 inst.operands[i].isreg = 1; \
5042 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5045 #define po_reg_or_goto(regtype, label) do { \
5046 val = arm_typed_reg_parse (&str, regtype, &rtype, \
5047 &inst.operands[i].vectype); \
5051 inst.operands[i].reg = val; \
5052 inst.operands[i].isreg = 1; \
5053 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
5056 #define po_imm_or_fail(min, max, popt) do { \
5057 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
5059 inst.operands[i].imm = val; \
5062 #define po_scalar_or_goto(elsz, label) do { \
5063 val = parse_scalar (&str, elsz, &inst.operands[i].vectype); \
5066 inst.operands[i].reg = val; \
5067 inst.operands[i].isscalar = 1; \
5070 #define po_misc_or_fail(expr) do { \
5075 skip_whitespace (str);
5077 for (i = 0; upat[i] != OP_stop; i++)
5079 if (upat[i] >= OP_FIRST_OPTIONAL)
5081 /* Remember where we are in case we need to backtrack. */
5082 assert (!backtrack_pos);
5083 backtrack_pos = str;
5084 backtrack_error = inst.error;
5085 backtrack_index = i;
5089 po_char_or_fail (',');
5097 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
5098 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
5099 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
5100 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
5101 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
5102 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
5104 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
5105 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
5106 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
5107 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
5108 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
5109 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
5110 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
5111 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
5112 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
5113 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
5114 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
5115 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
5117 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
5119 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
5121 /* Neon scalar. Using an element size of 8 means that some invalid
5122 scalars are accepted here, so deal with those in later code. */
5123 case OP_RNSC: po_scalar_or_goto (8, failure); break;
5125 /* WARNING: We can expand to two operands here. This has the potential
5126 to totally confuse the backtracking mechanism! It will be OK at
5127 least as long as we don't try to use optional args as well,
5131 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
5133 skip_past_comma (&str);
5134 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
5137 /* Optional register operand was omitted. Unfortunately, it's in
5138 operands[i-1] and we need it to be in inst.operands[i]. Fix that
5139 here (this is a bit grotty). */
5140 inst.operands[i] = inst.operands[i-1];
5141 inst.operands[i-1].present = 0;
5144 /* Immediate gets verified properly later, so accept any now. */
5145 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
5151 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
5154 po_imm_or_fail (0, 0, TRUE);
5160 po_scalar_or_goto (8, try_rr);
5163 po_reg_or_fail (REG_TYPE_RN);
5169 po_scalar_or_goto (8, try_ndq);
5172 po_reg_or_fail (REG_TYPE_NDQ);
5178 po_scalar_or_goto (8, try_vfd);
5181 po_reg_or_fail (REG_TYPE_VFD);
5186 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
5187 not careful then bad things might happen. */
5188 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
5193 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
5196 /* There's a possibility of getting a 64-bit immediate here, so
5197 we need special handling. */
5198 if (parse_big_immediate (&str, i) == FAIL)
5200 inst.error = _("immediate value is out of range");
5208 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
5211 po_imm_or_fail (0, 63, TRUE);
5216 po_char_or_fail ('[');
5217 po_reg_or_fail (REG_TYPE_RN);
5218 po_char_or_fail (']');
5222 po_reg_or_fail (REG_TYPE_RN);
5223 if (skip_past_char (&str, '!') == SUCCESS)
5224 inst.operands[i].writeback = 1;
5228 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
5229 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
5230 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
5231 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
5232 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
5233 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
5234 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
5235 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
5236 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
5237 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
5238 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
5239 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
5241 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
5243 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
5244 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
5246 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
5247 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
5248 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
5250 /* Immediate variants */
5252 po_char_or_fail ('{');
5253 po_imm_or_fail (0, 255, TRUE);
5254 po_char_or_fail ('}');
5258 /* The expression parser chokes on a trailing !, so we have
5259 to find it first and zap it. */
5262 while (*s && *s != ',')
5267 inst.operands[i].writeback = 1;
5269 po_imm_or_fail (0, 31, TRUE);
5277 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5282 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5287 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5289 if (inst.reloc.exp.X_op == O_symbol)
5291 val = parse_reloc (&str);
5294 inst.error = _("unrecognized relocation suffix");
5297 else if (val != BFD_RELOC_UNUSED)
5299 inst.operands[i].imm = val;
5300 inst.operands[i].hasreloc = 1;
5305 /* Operand for MOVW or MOVT. */
5307 po_misc_or_fail (parse_half (&str));
5310 /* Register or expression */
5311 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
5312 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
5314 /* Register or immediate */
5315 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
5316 I0: po_imm_or_fail (0, 0, FALSE); break;
5318 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
5320 if (!is_immediate_prefix (*str))
5323 val = parse_fpa_immediate (&str);
5326 /* FPA immediates are encoded as registers 8-15.
5327 parse_fpa_immediate has already applied the offset. */
5328 inst.operands[i].reg = val;
5329 inst.operands[i].isreg = 1;
5332 /* Two kinds of register */
5335 struct reg_entry *rege = arm_reg_parse_multi (&str);
5336 if (rege->type != REG_TYPE_MMXWR
5337 && rege->type != REG_TYPE_MMXWC
5338 && rege->type != REG_TYPE_MMXWCG)
5340 inst.error = _("iWMMXt data or control register expected");
5343 inst.operands[i].reg = rege->number;
5344 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
5349 case OP_CPSF: val = parse_cps_flags (&str); break;
5350 case OP_ENDI: val = parse_endian_specifier (&str); break;
5351 case OP_oROR: val = parse_ror (&str); break;
5352 case OP_PSR: val = parse_psr (&str); break;
5353 case OP_COND: val = parse_cond (&str); break;
5354 case OP_oBARRIER:val = parse_barrier (&str); break;
5357 po_misc_or_fail (parse_tb (&str));
5360 /* Register lists */
5362 val = parse_reg_list (&str);
5365 inst.operands[1].writeback = 1;
5371 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
5375 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
5379 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
5384 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
5385 &inst.operands[i].vectype);
5388 /* Addressing modes */
5390 po_misc_or_fail (parse_address (&str, i));
5394 po_misc_or_fail (parse_shifter_operand (&str, i));
5398 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
5402 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
5406 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
5410 as_fatal ("unhandled operand code %d", upat[i]);
5413 /* Various value-based sanity checks and shared operations. We
5414 do not signal immediate failures for the register constraints;
5415 this allows a syntax error to take precedence. */
5423 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
5424 inst.error = BAD_PC;
5440 inst.operands[i].imm = val;
5447 /* If we get here, this operand was successfully parsed. */
5448 inst.operands[i].present = 1;
5452 inst.error = BAD_ARGS;
5457 /* The parse routine should already have set inst.error, but set a
5458 defaut here just in case. */
5460 inst.error = _("syntax error");
5464 /* Do not backtrack over a trailing optional argument that
5465 absorbed some text. We will only fail again, with the
5466 'garbage following instruction' error message, which is
5467 probably less helpful than the current one. */
5468 if (backtrack_index == i && backtrack_pos != str
5469 && upat[i+1] == OP_stop)
5472 inst.error = _("syntax error");
5476 /* Try again, skipping the optional argument at backtrack_pos. */
5477 str = backtrack_pos;
5478 inst.error = backtrack_error;
5479 inst.operands[backtrack_index].present = 0;
5480 i = backtrack_index;
5484 /* Check that we have parsed all the arguments. */
5485 if (*str != '\0' && !inst.error)
5486 inst.error = _("garbage following instruction");
5488 return inst.error ? FAIL : SUCCESS;
5491 #undef po_char_or_fail
5492 #undef po_reg_or_fail
5493 #undef po_reg_or_goto
5494 #undef po_imm_or_fail
5495 #undef po_scalar_or_fail
5497 /* Shorthand macro for instruction encoding functions issuing errors. */
5498 #define constraint(expr, err) do { \
5506 /* Functions for operand encoding. ARM, then Thumb. */
5508 #define rotate_left(v, n) (v << n | v >> (32 - n))
5510 /* If VAL can be encoded in the immediate field of an ARM instruction,
5511 return the encoded form. Otherwise, return FAIL. */
5514 encode_arm_immediate (unsigned int val)
5518 for (i = 0; i < 32; i += 2)
5519 if ((a = rotate_left (val, i)) <= 0xff)
5520 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
5525 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
5526 return the encoded form. Otherwise, return FAIL. */
5528 encode_thumb32_immediate (unsigned int val)
5535 for (i = 1; i <= 24; i++)
5538 if ((val & ~(0xff << i)) == 0)
5539 return ((val >> i) & 0x7f) | ((32 - i) << 7);
5543 if (val == ((a << 16) | a))
5545 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
5549 if (val == ((a << 16) | a))
5550 return 0x200 | (a >> 8);
5554 /* Encode a VFP SP or DP register number into inst.instruction. */
5557 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
5559 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
5562 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
5565 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
5568 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
5573 first_error (_("D register out of range for selected VFP version"));
5581 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
5585 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
5589 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
5593 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
5597 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
5601 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
5609 /* Encode a <shift> in an ARM-format instruction. The immediate,
5610 if any, is handled by md_apply_fix. */
5612 encode_arm_shift (int i)
5614 if (inst.operands[i].shift_kind == SHIFT_RRX)
5615 inst.instruction |= SHIFT_ROR << 5;
5618 inst.instruction |= inst.operands[i].shift_kind << 5;
5619 if (inst.operands[i].immisreg)
5621 inst.instruction |= SHIFT_BY_REG;
5622 inst.instruction |= inst.operands[i].imm << 8;
5625 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5630 encode_arm_shifter_operand (int i)
5632 if (inst.operands[i].isreg)
5634 inst.instruction |= inst.operands[i].reg;
5635 encode_arm_shift (i);
5638 inst.instruction |= INST_IMMEDIATE;
5641 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5643 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5645 assert (inst.operands[i].isreg);
5646 inst.instruction |= inst.operands[i].reg << 16;
5648 if (inst.operands[i].preind)
5652 inst.error = _("instruction does not accept preindexed addressing");
5655 inst.instruction |= PRE_INDEX;
5656 if (inst.operands[i].writeback)
5657 inst.instruction |= WRITE_BACK;
5660 else if (inst.operands[i].postind)
5662 assert (inst.operands[i].writeback);
5664 inst.instruction |= WRITE_BACK;
5666 else /* unindexed - only for coprocessor */
5668 inst.error = _("instruction does not accept unindexed addressing");
5672 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5673 && (((inst.instruction & 0x000f0000) >> 16)
5674 == ((inst.instruction & 0x0000f000) >> 12)))
5675 as_warn ((inst.instruction & LOAD_BIT)
5676 ? _("destination register same as write-back base")
5677 : _("source register same as write-back base"));
5680 /* inst.operands[i] was set up by parse_address. Encode it into an
5681 ARM-format mode 2 load or store instruction. If is_t is true,
5682 reject forms that cannot be used with a T instruction (i.e. not
5685 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5687 encode_arm_addr_mode_common (i, is_t);
5689 if (inst.operands[i].immisreg)
5691 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5692 inst.instruction |= inst.operands[i].imm;
5693 if (!inst.operands[i].negative)
5694 inst.instruction |= INDEX_UP;
5695 if (inst.operands[i].shifted)
5697 if (inst.operands[i].shift_kind == SHIFT_RRX)
5698 inst.instruction |= SHIFT_ROR << 5;
5701 inst.instruction |= inst.operands[i].shift_kind << 5;
5702 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5706 else /* immediate offset in inst.reloc */
5708 if (inst.reloc.type == BFD_RELOC_UNUSED)
5709 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5713 /* inst.operands[i] was set up by parse_address. Encode it into an
5714 ARM-format mode 3 load or store instruction. Reject forms that
5715 cannot be used with such instructions. If is_t is true, reject
5716 forms that cannot be used with a T instruction (i.e. not
5719 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5721 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5723 inst.error = _("instruction does not accept scaled register index");
5727 encode_arm_addr_mode_common (i, is_t);
5729 if (inst.operands[i].immisreg)
5731 inst.instruction |= inst.operands[i].imm;
5732 if (!inst.operands[i].negative)
5733 inst.instruction |= INDEX_UP;
5735 else /* immediate offset in inst.reloc */
5737 inst.instruction |= HWOFFSET_IMM;
5738 if (inst.reloc.type == BFD_RELOC_UNUSED)
5739 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5743 /* inst.operands[i] was set up by parse_address. Encode it into an
5744 ARM-format instruction. Reject all forms which cannot be encoded
5745 into a coprocessor load/store instruction. If wb_ok is false,
5746 reject use of writeback; if unind_ok is false, reject use of
5747 unindexed addressing. If reloc_override is not 0, use it instead
5748 of BFD_ARM_CP_OFF_IMM. */
5751 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5753 inst.instruction |= inst.operands[i].reg << 16;
5755 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5757 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5759 assert (!inst.operands[i].writeback);
5762 inst.error = _("instruction does not support unindexed addressing");
5765 inst.instruction |= inst.operands[i].imm;
5766 inst.instruction |= INDEX_UP;
5770 if (inst.operands[i].preind)
5771 inst.instruction |= PRE_INDEX;
5773 if (inst.operands[i].writeback)
5775 if (inst.operands[i].reg == REG_PC)
5777 inst.error = _("pc may not be used with write-back");
5782 inst.error = _("instruction does not support writeback");
5785 inst.instruction |= WRITE_BACK;
5789 inst.reloc.type = reloc_override;
5790 else if (thumb_mode)
5791 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5793 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5797 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5798 Determine whether it can be performed with a move instruction; if
5799 it can, convert inst.instruction to that move instruction and
5800 return 1; if it can't, convert inst.instruction to a literal-pool
5801 load and return 0. If this is not a valid thing to do in the
5802 current context, set inst.error and return 1.
5804 inst.operands[i] describes the destination register. */
5807 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5812 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5816 if ((inst.instruction & tbit) == 0)
5818 inst.error = _("invalid pseudo operation");
5821 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5823 inst.error = _("constant expression expected");
5826 if (inst.reloc.exp.X_op == O_constant)
5830 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5832 /* This can be done with a mov(1) instruction. */
5833 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5834 inst.instruction |= inst.reloc.exp.X_add_number;
5840 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5843 /* This can be done with a mov instruction. */
5844 inst.instruction &= LITERAL_MASK;
5845 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5846 inst.instruction |= value & 0xfff;
5850 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5853 /* This can be done with a mvn instruction. */
5854 inst.instruction &= LITERAL_MASK;
5855 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5856 inst.instruction |= value & 0xfff;
5862 if (add_to_lit_pool () == FAIL)
5864 inst.error = _("literal pool insertion failed");
5867 inst.operands[1].reg = REG_PC;
5868 inst.operands[1].isreg = 1;
5869 inst.operands[1].preind = 1;
5870 inst.reloc.pc_rel = 1;
5871 inst.reloc.type = (thumb_p
5872 ? BFD_RELOC_ARM_THUMB_OFFSET
5874 ? BFD_RELOC_ARM_HWLITERAL
5875 : BFD_RELOC_ARM_LITERAL));
5879 /* Functions for instruction encoding, sorted by subarchitecture.
5880 First some generics; their names are taken from the conventional
5881 bit positions for register arguments in ARM format instructions. */
5891 inst.instruction |= inst.operands[0].reg << 12;
5897 inst.instruction |= inst.operands[0].reg << 12;
5898 inst.instruction |= inst.operands[1].reg;
5904 inst.instruction |= inst.operands[0].reg << 12;
5905 inst.instruction |= inst.operands[1].reg << 16;
5911 inst.instruction |= inst.operands[0].reg << 16;
5912 inst.instruction |= inst.operands[1].reg << 12;
5918 unsigned Rn = inst.operands[2].reg;
5919 /* Enforce restrictions on SWP instruction. */
5920 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5921 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5922 _("Rn must not overlap other operands"));
5923 inst.instruction |= inst.operands[0].reg << 12;
5924 inst.instruction |= inst.operands[1].reg;
5925 inst.instruction |= Rn << 16;
5931 inst.instruction |= inst.operands[0].reg << 12;
5932 inst.instruction |= inst.operands[1].reg << 16;
5933 inst.instruction |= inst.operands[2].reg;
5939 inst.instruction |= inst.operands[0].reg;
5940 inst.instruction |= inst.operands[1].reg << 12;
5941 inst.instruction |= inst.operands[2].reg << 16;
5947 inst.instruction |= inst.operands[0].imm;
5953 inst.instruction |= inst.operands[0].reg << 12;
5954 encode_arm_cp_address (1, TRUE, TRUE, 0);
5957 /* ARM instructions, in alphabetical order by function name (except
5958 that wrapper functions appear immediately after the function they
5961 /* This is a pseudo-op of the form "adr rd, label" to be converted
5962 into a relative address of the form "add rd, pc, #label-.-8". */
5967 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5969 /* Frag hacking will turn this into a sub instruction if the offset turns
5970 out to be negative. */
5971 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5972 inst.reloc.pc_rel = 1;
5973 inst.reloc.exp.X_add_number -= 8;
5976 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5977 into a relative address of the form:
5978 add rd, pc, #low(label-.-8)"
5979 add rd, rd, #high(label-.-8)" */
5984 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5986 /* Frag hacking will turn this into a sub instruction if the offset turns
5987 out to be negative. */
5988 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5989 inst.reloc.pc_rel = 1;
5990 inst.size = INSN_SIZE * 2;
5991 inst.reloc.exp.X_add_number -= 8;
5997 if (!inst.operands[1].present)
5998 inst.operands[1].reg = inst.operands[0].reg;
5999 inst.instruction |= inst.operands[0].reg << 12;
6000 inst.instruction |= inst.operands[1].reg << 16;
6001 encode_arm_shifter_operand (2);
6007 if (inst.operands[0].present)
6009 constraint ((inst.instruction & 0xf0) != 0x40
6010 && inst.operands[0].imm != 0xf,
6011 "bad barrier type");
6012 inst.instruction |= inst.operands[0].imm;
6015 inst.instruction |= 0xf;
6021 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
6022 constraint (msb > 32, _("bit-field extends past end of register"));
6023 /* The instruction encoding stores the LSB and MSB,
6024 not the LSB and width. */
6025 inst.instruction |= inst.operands[0].reg << 12;
6026 inst.instruction |= inst.operands[1].imm << 7;
6027 inst.instruction |= (msb - 1) << 16;
6035 /* #0 in second position is alternative syntax for bfc, which is
6036 the same instruction but with REG_PC in the Rm field. */
6037 if (!inst.operands[1].isreg)
6038 inst.operands[1].reg = REG_PC;
6040 msb = inst.operands[2].imm + inst.operands[3].imm;
6041 constraint (msb > 32, _("bit-field extends past end of register"));
6042 /* The instruction encoding stores the LSB and MSB,
6043 not the LSB and width. */
6044 inst.instruction |= inst.operands[0].reg << 12;
6045 inst.instruction |= inst.operands[1].reg;
6046 inst.instruction |= inst.operands[2].imm << 7;
6047 inst.instruction |= (msb - 1) << 16;
6053 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
6054 _("bit-field extends past end of register"));
6055 inst.instruction |= inst.operands[0].reg << 12;
6056 inst.instruction |= inst.operands[1].reg;
6057 inst.instruction |= inst.operands[2].imm << 7;
6058 inst.instruction |= (inst.operands[3].imm - 1) << 16;
6061 /* ARM V5 breakpoint instruction (argument parse)
6062 BKPT <16 bit unsigned immediate>
6063 Instruction is not conditional.
6064 The bit pattern given in insns[] has the COND_ALWAYS condition,
6065 and it is an error if the caller tried to override that. */
6070 /* Top 12 of 16 bits to bits 19:8. */
6071 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
6073 /* Bottom 4 of 16 bits to bits 3:0. */
6074 inst.instruction |= inst.operands[0].imm & 0xf;
6078 encode_branch (int default_reloc)
6080 if (inst.operands[0].hasreloc)
6082 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
6083 _("the only suffix valid here is '(plt)'"));
6084 inst.reloc.type = BFD_RELOC_ARM_PLT32;
6088 inst.reloc.type = default_reloc;
6090 inst.reloc.pc_rel = 1;
6097 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6098 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6101 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6108 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6110 if (inst.cond == COND_ALWAYS)
6111 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6113 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
6117 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
6120 /* ARM V5 branch-link-exchange instruction (argument parse)
6121 BLX <target_addr> ie BLX(1)
6122 BLX{<condition>} <Rm> ie BLX(2)
6123 Unfortunately, there are two different opcodes for this mnemonic.
6124 So, the insns[].value is not used, and the code here zaps values
6125 into inst.instruction.
6126 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
6131 if (inst.operands[0].isreg)
6133 /* Arg is a register; the opcode provided by insns[] is correct.
6134 It is not illegal to do "blx pc", just useless. */
6135 if (inst.operands[0].reg == REG_PC)
6136 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
6138 inst.instruction |= inst.operands[0].reg;
6142 /* Arg is an address; this instruction cannot be executed
6143 conditionally, and the opcode must be adjusted. */
6144 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6145 inst.instruction = 0xfa000000;
6147 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
6148 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
6151 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
6158 if (inst.operands[0].reg == REG_PC)
6159 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
6161 inst.instruction |= inst.operands[0].reg;
6165 /* ARM v5TEJ. Jump to Jazelle code. */
6170 if (inst.operands[0].reg == REG_PC)
6171 as_tsktsk (_("use of r15 in bxj is not really useful"));
6173 inst.instruction |= inst.operands[0].reg;
6176 /* Co-processor data operation:
6177 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
6178 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
6182 inst.instruction |= inst.operands[0].reg << 8;
6183 inst.instruction |= inst.operands[1].imm << 20;
6184 inst.instruction |= inst.operands[2].reg << 12;
6185 inst.instruction |= inst.operands[3].reg << 16;
6186 inst.instruction |= inst.operands[4].reg;
6187 inst.instruction |= inst.operands[5].imm << 5;
6193 inst.instruction |= inst.operands[0].reg << 16;
6194 encode_arm_shifter_operand (1);
6197 /* Transfer between coprocessor and ARM registers.
6198 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
6203 No special properties. */
6208 inst.instruction |= inst.operands[0].reg << 8;
6209 inst.instruction |= inst.operands[1].imm << 21;
6210 inst.instruction |= inst.operands[2].reg << 12;
6211 inst.instruction |= inst.operands[3].reg << 16;
6212 inst.instruction |= inst.operands[4].reg;
6213 inst.instruction |= inst.operands[5].imm << 5;
6216 /* Transfer between coprocessor register and pair of ARM registers.
6217 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
6222 Two XScale instructions are special cases of these:
6224 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
6225 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
6227 Result unpredicatable if Rd or Rn is R15. */
6232 inst.instruction |= inst.operands[0].reg << 8;
6233 inst.instruction |= inst.operands[1].imm << 4;
6234 inst.instruction |= inst.operands[2].reg << 12;
6235 inst.instruction |= inst.operands[3].reg << 16;
6236 inst.instruction |= inst.operands[4].reg;
6242 inst.instruction |= inst.operands[0].imm << 6;
6243 inst.instruction |= inst.operands[1].imm;
6249 inst.instruction |= inst.operands[0].imm;
6255 /* There is no IT instruction in ARM mode. We
6256 process it but do not generate code for it. */
6263 int base_reg = inst.operands[0].reg;
6264 int range = inst.operands[1].imm;
6266 inst.instruction |= base_reg << 16;
6267 inst.instruction |= range;
6269 if (inst.operands[1].writeback)
6270 inst.instruction |= LDM_TYPE_2_OR_3;
6272 if (inst.operands[0].writeback)
6274 inst.instruction |= WRITE_BACK;
6275 /* Check for unpredictable uses of writeback. */
6276 if (inst.instruction & LOAD_BIT)
6278 /* Not allowed in LDM type 2. */
6279 if ((inst.instruction & LDM_TYPE_2_OR_3)
6280 && ((range & (1 << REG_PC)) == 0))
6281 as_warn (_("writeback of base register is UNPREDICTABLE"));
6282 /* Only allowed if base reg not in list for other types. */
6283 else if (range & (1 << base_reg))
6284 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
6288 /* Not allowed for type 2. */
6289 if (inst.instruction & LDM_TYPE_2_OR_3)
6290 as_warn (_("writeback of base register is UNPREDICTABLE"));
6291 /* Only allowed if base reg not in list, or first in list. */
6292 else if ((range & (1 << base_reg))
6293 && (range & ((1 << base_reg) - 1)))
6294 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
6299 /* ARMv5TE load-consecutive (argument parse)
6308 constraint (inst.operands[0].reg % 2 != 0,
6309 _("first destination register must be even"));
6310 constraint (inst.operands[1].present
6311 && inst.operands[1].reg != inst.operands[0].reg + 1,
6312 _("can only load two consecutive registers"));
6313 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6314 constraint (!inst.operands[2].isreg, _("'[' expected"));
6316 if (!inst.operands[1].present)
6317 inst.operands[1].reg = inst.operands[0].reg + 1;
6319 if (inst.instruction & LOAD_BIT)
6321 /* encode_arm_addr_mode_3 will diagnose overlap between the base
6322 register and the first register written; we have to diagnose
6323 overlap between the base and the second register written here. */
6325 if (inst.operands[2].reg == inst.operands[1].reg
6326 && (inst.operands[2].writeback || inst.operands[2].postind))
6327 as_warn (_("base register written back, and overlaps "
6328 "second destination register"));
6330 /* For an index-register load, the index register must not overlap the
6331 destination (even if not write-back). */
6332 else if (inst.operands[2].immisreg
6333 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
6334 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
6335 as_warn (_("index register overlaps destination register"));
6338 inst.instruction |= inst.operands[0].reg << 12;
6339 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
6345 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
6346 || inst.operands[1].postind || inst.operands[1].writeback
6347 || inst.operands[1].immisreg || inst.operands[1].shifted
6348 || inst.operands[1].negative
6349 /* This can arise if the programmer has written
6351 or if they have mistakenly used a register name as the last
6354 It is very difficult to distinguish between these two cases
6355 because "rX" might actually be a label. ie the register
6356 name has been occluded by a symbol of the same name. So we
6357 just generate a general 'bad addressing mode' type error
6358 message and leave it up to the programmer to discover the
6359 true cause and fix their mistake. */
6360 || (inst.operands[1].reg == REG_PC),
6363 constraint (inst.reloc.exp.X_op != O_constant
6364 || inst.reloc.exp.X_add_number != 0,
6365 _("offset must be zero in ARM encoding"));
6367 inst.instruction |= inst.operands[0].reg << 12;
6368 inst.instruction |= inst.operands[1].reg << 16;
6369 inst.reloc.type = BFD_RELOC_UNUSED;
6375 constraint (inst.operands[0].reg % 2 != 0,
6376 _("even register required"));
6377 constraint (inst.operands[1].present
6378 && inst.operands[1].reg != inst.operands[0].reg + 1,
6379 _("can only load two consecutive registers"));
6380 /* If op 1 were present and equal to PC, this function wouldn't
6381 have been called in the first place. */
6382 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
6384 inst.instruction |= inst.operands[0].reg << 12;
6385 inst.instruction |= inst.operands[2].reg << 16;
6391 inst.instruction |= inst.operands[0].reg << 12;
6392 if (!inst.operands[1].isreg)
6393 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
6395 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
6401 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6403 if (inst.operands[1].preind)
6405 constraint (inst.reloc.exp.X_op != O_constant ||
6406 inst.reloc.exp.X_add_number != 0,
6407 _("this instruction requires a post-indexed address"));
6409 inst.operands[1].preind = 0;
6410 inst.operands[1].postind = 1;
6411 inst.operands[1].writeback = 1;
6413 inst.instruction |= inst.operands[0].reg << 12;
6414 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
6417 /* Halfword and signed-byte load/store operations. */
6422 inst.instruction |= inst.operands[0].reg << 12;
6423 if (!inst.operands[1].isreg)
6424 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
6426 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
6432 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
6434 if (inst.operands[1].preind)
6436 constraint (inst.reloc.exp.X_op != O_constant ||
6437 inst.reloc.exp.X_add_number != 0,
6438 _("this instruction requires a post-indexed address"));
6440 inst.operands[1].preind = 0;
6441 inst.operands[1].postind = 1;
6442 inst.operands[1].writeback = 1;
6444 inst.instruction |= inst.operands[0].reg << 12;
6445 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
6448 /* Co-processor register load/store.
6449 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
6453 inst.instruction |= inst.operands[0].reg << 8;
6454 inst.instruction |= inst.operands[1].reg << 12;
6455 encode_arm_cp_address (2, TRUE, TRUE, 0);
6461 /* This restriction does not apply to mls (nor to mla in v6, but
6462 that's hard to detect at present). */
6463 if (inst.operands[0].reg == inst.operands[1].reg
6464 && !(inst.instruction & 0x00400000))
6465 as_tsktsk (_("rd and rm should be different in mla"));
6467 inst.instruction |= inst.operands[0].reg << 16;
6468 inst.instruction |= inst.operands[1].reg;
6469 inst.instruction |= inst.operands[2].reg << 8;
6470 inst.instruction |= inst.operands[3].reg << 12;
6477 inst.instruction |= inst.operands[0].reg << 12;
6478 encode_arm_shifter_operand (1);
6481 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
6488 top = (inst.instruction & 0x00400000) != 0;
6489 constraint (top && inst.reloc.type == BFD_RELOC_ARM_MOVW,
6490 _(":lower16: not allowed this instruction"));
6491 constraint (!top && inst.reloc.type == BFD_RELOC_ARM_MOVT,
6492 _(":upper16: not allowed instruction"));
6493 inst.instruction |= inst.operands[0].reg << 12;
6494 if (inst.reloc.type == BFD_RELOC_UNUSED)
6496 imm = inst.reloc.exp.X_add_number;
6497 /* The value is in two pieces: 0:11, 16:19. */
6498 inst.instruction |= (imm & 0x00000fff);
6499 inst.instruction |= (imm & 0x0000f000) << 4;
6506 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
6507 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
6509 _("'CPSR' or 'SPSR' expected"));
6510 inst.instruction |= inst.operands[0].reg << 12;
6511 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
6514 /* Two possible forms:
6515 "{C|S}PSR_<field>, Rm",
6516 "{C|S}PSR_f, #expression". */
6521 inst.instruction |= inst.operands[0].imm;
6522 if (inst.operands[1].isreg)
6523 inst.instruction |= inst.operands[1].reg;
6526 inst.instruction |= INST_IMMEDIATE;
6527 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
6528 inst.reloc.pc_rel = 0;
6535 if (!inst.operands[2].present)
6536 inst.operands[2].reg = inst.operands[0].reg;
6537 inst.instruction |= inst.operands[0].reg << 16;
6538 inst.instruction |= inst.operands[1].reg;
6539 inst.instruction |= inst.operands[2].reg << 8;
6541 if (inst.operands[0].reg == inst.operands[1].reg)
6542 as_tsktsk (_("rd and rm should be different in mul"));
6545 /* Long Multiply Parser
6546 UMULL RdLo, RdHi, Rm, Rs
6547 SMULL RdLo, RdHi, Rm, Rs
6548 UMLAL RdLo, RdHi, Rm, Rs
6549 SMLAL RdLo, RdHi, Rm, Rs. */
6554 inst.instruction |= inst.operands[0].reg << 12;
6555 inst.instruction |= inst.operands[1].reg << 16;
6556 inst.instruction |= inst.operands[2].reg;
6557 inst.instruction |= inst.operands[3].reg << 8;
6559 /* rdhi, rdlo and rm must all be different. */
6560 if (inst.operands[0].reg == inst.operands[1].reg
6561 || inst.operands[0].reg == inst.operands[2].reg
6562 || inst.operands[1].reg == inst.operands[2].reg)
6563 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
6569 if (inst.operands[0].present)
6571 /* Architectural NOP hints are CPSR sets with no bits selected. */
6572 inst.instruction &= 0xf0000000;
6573 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
6577 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
6578 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
6579 Condition defaults to COND_ALWAYS.
6580 Error if Rd, Rn or Rm are R15. */
6585 inst.instruction |= inst.operands[0].reg << 12;
6586 inst.instruction |= inst.operands[1].reg << 16;
6587 inst.instruction |= inst.operands[2].reg;
6588 if (inst.operands[3].present)
6589 encode_arm_shift (3);
6592 /* ARM V6 PKHTB (Argument Parse). */
6597 if (!inst.operands[3].present)
6599 /* If the shift specifier is omitted, turn the instruction
6600 into pkhbt rd, rm, rn. */
6601 inst.instruction &= 0xfff00010;
6602 inst.instruction |= inst.operands[0].reg << 12;
6603 inst.instruction |= inst.operands[1].reg;
6604 inst.instruction |= inst.operands[2].reg << 16;
6608 inst.instruction |= inst.operands[0].reg << 12;
6609 inst.instruction |= inst.operands[1].reg << 16;
6610 inst.instruction |= inst.operands[2].reg;
6611 encode_arm_shift (3);
6615 /* ARMv5TE: Preload-Cache
6619 Syntactically, like LDR with B=1, W=0, L=1. */
6624 constraint (!inst.operands[0].isreg,
6625 _("'[' expected after PLD mnemonic"));
6626 constraint (inst.operands[0].postind,
6627 _("post-indexed expression used in preload instruction"));
6628 constraint (inst.operands[0].writeback,
6629 _("writeback used in preload instruction"));
6630 constraint (!inst.operands[0].preind,
6631 _("unindexed addressing used in preload instruction"));
6632 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6635 /* ARMv7: PLI <addr_mode> */
6639 constraint (!inst.operands[0].isreg,
6640 _("'[' expected after PLI mnemonic"));
6641 constraint (inst.operands[0].postind,
6642 _("post-indexed expression used in preload instruction"));
6643 constraint (inst.operands[0].writeback,
6644 _("writeback used in preload instruction"));
6645 constraint (!inst.operands[0].preind,
6646 _("unindexed addressing used in preload instruction"));
6647 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6648 inst.instruction &= ~PRE_INDEX;
6654 inst.operands[1] = inst.operands[0];
6655 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6656 inst.operands[0].isreg = 1;
6657 inst.operands[0].writeback = 1;
6658 inst.operands[0].reg = REG_SP;
6662 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6663 word at the specified address and the following word
6665 Unconditionally executed.
6666 Error if Rn is R15. */
6671 inst.instruction |= inst.operands[0].reg << 16;
6672 if (inst.operands[0].writeback)
6673 inst.instruction |= WRITE_BACK;
6676 /* ARM V6 ssat (argument parse). */
6681 inst.instruction |= inst.operands[0].reg << 12;
6682 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6683 inst.instruction |= inst.operands[2].reg;
6685 if (inst.operands[3].present)
6686 encode_arm_shift (3);
6689 /* ARM V6 usat (argument parse). */
6694 inst.instruction |= inst.operands[0].reg << 12;
6695 inst.instruction |= inst.operands[1].imm << 16;
6696 inst.instruction |= inst.operands[2].reg;
6698 if (inst.operands[3].present)
6699 encode_arm_shift (3);
6702 /* ARM V6 ssat16 (argument parse). */
6707 inst.instruction |= inst.operands[0].reg << 12;
6708 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6709 inst.instruction |= inst.operands[2].reg;
6715 inst.instruction |= inst.operands[0].reg << 12;
6716 inst.instruction |= inst.operands[1].imm << 16;
6717 inst.instruction |= inst.operands[2].reg;
6720 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6721 preserving the other bits.
6723 setend <endian_specifier>, where <endian_specifier> is either
6729 if (inst.operands[0].imm)
6730 inst.instruction |= 0x200;
6736 unsigned int Rm = (inst.operands[1].present
6737 ? inst.operands[1].reg
6738 : inst.operands[0].reg);
6740 inst.instruction |= inst.operands[0].reg << 12;
6741 inst.instruction |= Rm;
6742 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6744 inst.instruction |= inst.operands[2].reg << 8;
6745 inst.instruction |= SHIFT_BY_REG;
6748 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6754 inst.reloc.type = BFD_RELOC_ARM_SMC;
6755 inst.reloc.pc_rel = 0;
6761 inst.reloc.type = BFD_RELOC_ARM_SWI;
6762 inst.reloc.pc_rel = 0;
6765 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6766 SMLAxy{cond} Rd,Rm,Rs,Rn
6767 SMLAWy{cond} Rd,Rm,Rs,Rn
6768 Error if any register is R15. */
6773 inst.instruction |= inst.operands[0].reg << 16;
6774 inst.instruction |= inst.operands[1].reg;
6775 inst.instruction |= inst.operands[2].reg << 8;
6776 inst.instruction |= inst.operands[3].reg << 12;
6779 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6780 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6781 Error if any register is R15.
6782 Warning if Rdlo == Rdhi. */
6787 inst.instruction |= inst.operands[0].reg << 12;
6788 inst.instruction |= inst.operands[1].reg << 16;
6789 inst.instruction |= inst.operands[2].reg;
6790 inst.instruction |= inst.operands[3].reg << 8;
6792 if (inst.operands[0].reg == inst.operands[1].reg)
6793 as_tsktsk (_("rdhi and rdlo must be different"));
6796 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6797 SMULxy{cond} Rd,Rm,Rs
6798 Error if any register is R15. */
6803 inst.instruction |= inst.operands[0].reg << 16;
6804 inst.instruction |= inst.operands[1].reg;
6805 inst.instruction |= inst.operands[2].reg << 8;
6808 /* ARM V6 srs (argument parse). */
6813 inst.instruction |= inst.operands[0].imm;
6814 if (inst.operands[0].writeback)
6815 inst.instruction |= WRITE_BACK;
6818 /* ARM V6 strex (argument parse). */
6823 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6824 || inst.operands[2].postind || inst.operands[2].writeback
6825 || inst.operands[2].immisreg || inst.operands[2].shifted
6826 || inst.operands[2].negative
6827 /* See comment in do_ldrex(). */
6828 || (inst.operands[2].reg == REG_PC),
6831 constraint (inst.operands[0].reg == inst.operands[1].reg
6832 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6834 constraint (inst.reloc.exp.X_op != O_constant
6835 || inst.reloc.exp.X_add_number != 0,
6836 _("offset must be zero in ARM encoding"));
6838 inst.instruction |= inst.operands[0].reg << 12;
6839 inst.instruction |= inst.operands[1].reg;
6840 inst.instruction |= inst.operands[2].reg << 16;
6841 inst.reloc.type = BFD_RELOC_UNUSED;
6847 constraint (inst.operands[1].reg % 2 != 0,
6848 _("even register required"));
6849 constraint (inst.operands[2].present
6850 && inst.operands[2].reg != inst.operands[1].reg + 1,
6851 _("can only store two consecutive registers"));
6852 /* If op 2 were present and equal to PC, this function wouldn't
6853 have been called in the first place. */
6854 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6856 constraint (inst.operands[0].reg == inst.operands[1].reg
6857 || inst.operands[0].reg == inst.operands[1].reg + 1
6858 || inst.operands[0].reg == inst.operands[3].reg,
6861 inst.instruction |= inst.operands[0].reg << 12;
6862 inst.instruction |= inst.operands[1].reg;
6863 inst.instruction |= inst.operands[3].reg << 16;
6866 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6867 extends it to 32-bits, and adds the result to a value in another
6868 register. You can specify a rotation by 0, 8, 16, or 24 bits
6869 before extracting the 16-bit value.
6870 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6871 Condition defaults to COND_ALWAYS.
6872 Error if any register uses R15. */
6877 inst.instruction |= inst.operands[0].reg << 12;
6878 inst.instruction |= inst.operands[1].reg << 16;
6879 inst.instruction |= inst.operands[2].reg;
6880 inst.instruction |= inst.operands[3].imm << 10;
6885 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6886 Condition defaults to COND_ALWAYS.
6887 Error if any register uses R15. */
6892 inst.instruction |= inst.operands[0].reg << 12;
6893 inst.instruction |= inst.operands[1].reg;
6894 inst.instruction |= inst.operands[2].imm << 10;
6897 /* VFP instructions. In a logical order: SP variant first, monad
6898 before dyad, arithmetic then move then load/store. */
6901 do_vfp_sp_monadic (void)
6903 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6904 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6908 do_vfp_sp_dyadic (void)
6910 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6911 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6912 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6916 do_vfp_sp_compare_z (void)
6918 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6922 do_vfp_dp_sp_cvt (void)
6924 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6925 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6929 do_vfp_sp_dp_cvt (void)
6931 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6932 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6936 do_vfp_reg_from_sp (void)
6938 inst.instruction |= inst.operands[0].reg << 12;
6939 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6943 do_vfp_reg2_from_sp2 (void)
6945 constraint (inst.operands[2].imm != 2,
6946 _("only two consecutive VFP SP registers allowed here"));
6947 inst.instruction |= inst.operands[0].reg << 12;
6948 inst.instruction |= inst.operands[1].reg << 16;
6949 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6953 do_vfp_sp_from_reg (void)
6955 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6956 inst.instruction |= inst.operands[1].reg << 12;
6960 do_vfp_sp2_from_reg2 (void)
6962 constraint (inst.operands[0].imm != 2,
6963 _("only two consecutive VFP SP registers allowed here"));
6964 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6965 inst.instruction |= inst.operands[1].reg << 12;
6966 inst.instruction |= inst.operands[2].reg << 16;
6970 do_vfp_sp_ldst (void)
6972 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6973 encode_arm_cp_address (1, FALSE, TRUE, 0);
6977 do_vfp_dp_ldst (void)
6979 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6980 encode_arm_cp_address (1, FALSE, TRUE, 0);
6985 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6987 if (inst.operands[0].writeback)
6988 inst.instruction |= WRITE_BACK;
6990 constraint (ldstm_type != VFP_LDSTMIA,
6991 _("this addressing mode requires base-register writeback"));
6992 inst.instruction |= inst.operands[0].reg << 16;
6993 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6994 inst.instruction |= inst.operands[1].imm;
6998 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
7002 if (inst.operands[0].writeback)
7003 inst.instruction |= WRITE_BACK;
7005 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
7006 _("this addressing mode requires base-register writeback"));
7008 inst.instruction |= inst.operands[0].reg << 16;
7009 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7011 count = inst.operands[1].imm << 1;
7012 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
7015 inst.instruction |= count;
7019 do_vfp_sp_ldstmia (void)
7021 vfp_sp_ldstm (VFP_LDSTMIA);
7025 do_vfp_sp_ldstmdb (void)
7027 vfp_sp_ldstm (VFP_LDSTMDB);
7031 do_vfp_dp_ldstmia (void)
7033 vfp_dp_ldstm (VFP_LDSTMIA);
7037 do_vfp_dp_ldstmdb (void)
7039 vfp_dp_ldstm (VFP_LDSTMDB);
7043 do_vfp_xp_ldstmia (void)
7045 vfp_dp_ldstm (VFP_LDSTMIAX);
7049 do_vfp_xp_ldstmdb (void)
7051 vfp_dp_ldstm (VFP_LDSTMDBX);
7055 do_vfp_dp_rd_rm (void)
7057 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7058 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
7062 do_vfp_dp_rn_rd (void)
7064 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
7065 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7069 do_vfp_dp_rd_rn (void)
7071 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7072 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7076 do_vfp_dp_rd_rn_rm (void)
7078 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7079 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
7080 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
7086 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7090 do_vfp_dp_rm_rd_rn (void)
7092 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
7093 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
7094 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
7097 /* VFPv3 instructions. */
7099 do_vfp_sp_const (void)
7101 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7102 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7103 inst.instruction |= (inst.operands[1].imm >> 4);
7107 do_vfp_dp_const (void)
7109 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7110 inst.instruction |= (inst.operands[1].imm & 15) << 16;
7111 inst.instruction |= (inst.operands[1].imm >> 4);
7115 vfp_conv (int srcsize)
7117 unsigned immbits = srcsize - inst.operands[1].imm;
7118 inst.instruction |= (immbits & 1) << 5;
7119 inst.instruction |= (immbits >> 1);
7123 do_vfp_sp_conv_16 (void)
7125 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7130 do_vfp_dp_conv_16 (void)
7132 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7137 do_vfp_sp_conv_32 (void)
7139 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
7144 do_vfp_dp_conv_32 (void)
7146 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
7151 /* FPA instructions. Also in a logical order. */
7156 inst.instruction |= inst.operands[0].reg << 16;
7157 inst.instruction |= inst.operands[1].reg;
7161 do_fpa_ldmstm (void)
7163 inst.instruction |= inst.operands[0].reg << 12;
7164 switch (inst.operands[1].imm)
7166 case 1: inst.instruction |= CP_T_X; break;
7167 case 2: inst.instruction |= CP_T_Y; break;
7168 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
7173 if (inst.instruction & (PRE_INDEX | INDEX_UP))
7175 /* The instruction specified "ea" or "fd", so we can only accept
7176 [Rn]{!}. The instruction does not really support stacking or
7177 unstacking, so we have to emulate these by setting appropriate
7178 bits and offsets. */
7179 constraint (inst.reloc.exp.X_op != O_constant
7180 || inst.reloc.exp.X_add_number != 0,
7181 _("this instruction does not support indexing"));
7183 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
7184 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
7186 if (!(inst.instruction & INDEX_UP))
7187 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
7189 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
7191 inst.operands[2].preind = 0;
7192 inst.operands[2].postind = 1;
7196 encode_arm_cp_address (2, TRUE, TRUE, 0);
7199 /* iWMMXt instructions: strictly in alphabetical order. */
7202 do_iwmmxt_tandorc (void)
7204 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
7208 do_iwmmxt_textrc (void)
7210 inst.instruction |= inst.operands[0].reg << 12;
7211 inst.instruction |= inst.operands[1].imm;
7215 do_iwmmxt_textrm (void)
7217 inst.instruction |= inst.operands[0].reg << 12;
7218 inst.instruction |= inst.operands[1].reg << 16;
7219 inst.instruction |= inst.operands[2].imm;
7223 do_iwmmxt_tinsr (void)
7225 inst.instruction |= inst.operands[0].reg << 16;
7226 inst.instruction |= inst.operands[1].reg << 12;
7227 inst.instruction |= inst.operands[2].imm;
7231 do_iwmmxt_tmia (void)
7233 inst.instruction |= inst.operands[0].reg << 5;
7234 inst.instruction |= inst.operands[1].reg;
7235 inst.instruction |= inst.operands[2].reg << 12;
7239 do_iwmmxt_waligni (void)
7241 inst.instruction |= inst.operands[0].reg << 12;
7242 inst.instruction |= inst.operands[1].reg << 16;
7243 inst.instruction |= inst.operands[2].reg;
7244 inst.instruction |= inst.operands[3].imm << 20;
7248 do_iwmmxt_wmov (void)
7250 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
7251 inst.instruction |= inst.operands[0].reg << 12;
7252 inst.instruction |= inst.operands[1].reg << 16;
7253 inst.instruction |= inst.operands[1].reg;
7257 do_iwmmxt_wldstbh (void)
7260 inst.instruction |= inst.operands[0].reg << 12;
7262 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
7264 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
7265 encode_arm_cp_address (1, TRUE, FALSE, reloc);
7269 do_iwmmxt_wldstw (void)
7271 /* RIWR_RIWC clears .isreg for a control register. */
7272 if (!inst.operands[0].isreg)
7274 constraint (inst.cond != COND_ALWAYS, BAD_COND);
7275 inst.instruction |= 0xf0000000;
7278 inst.instruction |= inst.operands[0].reg << 12;
7279 encode_arm_cp_address (1, TRUE, TRUE, 0);
7283 do_iwmmxt_wldstd (void)
7285 inst.instruction |= inst.operands[0].reg << 12;
7286 encode_arm_cp_address (1, TRUE, FALSE, 0);
7290 do_iwmmxt_wshufh (void)
7292 inst.instruction |= inst.operands[0].reg << 12;
7293 inst.instruction |= inst.operands[1].reg << 16;
7294 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
7295 inst.instruction |= (inst.operands[2].imm & 0x0f);
7299 do_iwmmxt_wzero (void)
7301 /* WZERO reg is an alias for WANDN reg, reg, reg. */
7302 inst.instruction |= inst.operands[0].reg;
7303 inst.instruction |= inst.operands[0].reg << 12;
7304 inst.instruction |= inst.operands[0].reg << 16;
7307 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
7308 operations first, then control, shift, and load/store. */
7310 /* Insns like "foo X,Y,Z". */
7313 do_mav_triple (void)
7315 inst.instruction |= inst.operands[0].reg << 16;
7316 inst.instruction |= inst.operands[1].reg;
7317 inst.instruction |= inst.operands[2].reg << 12;
7320 /* Insns like "foo W,X,Y,Z".
7321 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
7326 inst.instruction |= inst.operands[0].reg << 5;
7327 inst.instruction |= inst.operands[1].reg << 12;
7328 inst.instruction |= inst.operands[2].reg << 16;
7329 inst.instruction |= inst.operands[3].reg;
7332 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
7336 inst.instruction |= inst.operands[1].reg << 12;
7339 /* Maverick shift immediate instructions.
7340 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
7341 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
7346 int imm = inst.operands[2].imm;
7348 inst.instruction |= inst.operands[0].reg << 12;
7349 inst.instruction |= inst.operands[1].reg << 16;
7351 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
7352 Bits 5-7 of the insn should have bits 4-6 of the immediate.
7353 Bit 4 should be 0. */
7354 imm = (imm & 0xf) | ((imm & 0x70) << 1);
7356 inst.instruction |= imm;
7359 /* XScale instructions. Also sorted arithmetic before move. */
7361 /* Xscale multiply-accumulate (argument parse)
7364 MIAxycc acc0,Rm,Rs. */
7369 inst.instruction |= inst.operands[1].reg;
7370 inst.instruction |= inst.operands[2].reg << 12;
7373 /* Xscale move-accumulator-register (argument parse)
7375 MARcc acc0,RdLo,RdHi. */
7380 inst.instruction |= inst.operands[1].reg << 12;
7381 inst.instruction |= inst.operands[2].reg << 16;
7384 /* Xscale move-register-accumulator (argument parse)
7386 MRAcc RdLo,RdHi,acc0. */
7391 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
7392 inst.instruction |= inst.operands[0].reg << 12;
7393 inst.instruction |= inst.operands[1].reg << 16;
7396 /* Encoding functions relevant only to Thumb. */
7398 /* inst.operands[i] is a shifted-register operand; encode
7399 it into inst.instruction in the format used by Thumb32. */
7402 encode_thumb32_shifted_operand (int i)
7404 unsigned int value = inst.reloc.exp.X_add_number;
7405 unsigned int shift = inst.operands[i].shift_kind;
7407 constraint (inst.operands[i].immisreg,
7408 _("shift by register not allowed in thumb mode"));
7409 inst.instruction |= inst.operands[i].reg;
7410 if (shift == SHIFT_RRX)
7411 inst.instruction |= SHIFT_ROR << 4;
7414 constraint (inst.reloc.exp.X_op != O_constant,
7415 _("expression too complex"));
7417 constraint (value > 32
7418 || (value == 32 && (shift == SHIFT_LSL
7419 || shift == SHIFT_ROR)),
7420 _("shift expression is too large"));
7424 else if (value == 32)
7427 inst.instruction |= shift << 4;
7428 inst.instruction |= (value & 0x1c) << 10;
7429 inst.instruction |= (value & 0x03) << 6;
7434 /* inst.operands[i] was set up by parse_address. Encode it into a
7435 Thumb32 format load or store instruction. Reject forms that cannot
7436 be used with such instructions. If is_t is true, reject forms that
7437 cannot be used with a T instruction; if is_d is true, reject forms
7438 that cannot be used with a D instruction. */
7441 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
7443 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
7445 constraint (!inst.operands[i].isreg,
7446 _("Instruction does not support =N addresses"));
7448 inst.instruction |= inst.operands[i].reg << 16;
7449 if (inst.operands[i].immisreg)
7451 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
7452 constraint (is_t || is_d, _("cannot use register index with this instruction"));
7453 constraint (inst.operands[i].negative,
7454 _("Thumb does not support negative register indexing"));
7455 constraint (inst.operands[i].postind,
7456 _("Thumb does not support register post-indexing"));
7457 constraint (inst.operands[i].writeback,
7458 _("Thumb does not support register indexing with writeback"));
7459 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
7460 _("Thumb supports only LSL in shifted register indexing"));
7462 inst.instruction |= inst.operands[i].imm;
7463 if (inst.operands[i].shifted)
7465 constraint (inst.reloc.exp.X_op != O_constant,
7466 _("expression too complex"));
7467 constraint (inst.reloc.exp.X_add_number < 0
7468 || inst.reloc.exp.X_add_number > 3,
7469 _("shift out of range"));
7470 inst.instruction |= inst.reloc.exp.X_add_number << 4;
7472 inst.reloc.type = BFD_RELOC_UNUSED;
7474 else if (inst.operands[i].preind)
7476 constraint (is_pc && inst.operands[i].writeback,
7477 _("cannot use writeback with PC-relative addressing"));
7478 constraint (is_t && inst.operands[i].writeback,
7479 _("cannot use writeback with this instruction"));
7483 inst.instruction |= 0x01000000;
7484 if (inst.operands[i].writeback)
7485 inst.instruction |= 0x00200000;
7489 inst.instruction |= 0x00000c00;
7490 if (inst.operands[i].writeback)
7491 inst.instruction |= 0x00000100;
7493 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7495 else if (inst.operands[i].postind)
7497 assert (inst.operands[i].writeback);
7498 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
7499 constraint (is_t, _("cannot use post-indexing with this instruction"));
7502 inst.instruction |= 0x00200000;
7504 inst.instruction |= 0x00000900;
7505 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
7507 else /* unindexed - only for coprocessor */
7508 inst.error = _("instruction does not accept unindexed addressing");
7511 /* Table of Thumb instructions which exist in both 16- and 32-bit
7512 encodings (the latter only in post-V6T2 cores). The index is the
7513 value used in the insns table below. When there is more than one
7514 possible 16-bit encoding for the instruction, this table always
7516 Also contains several pseudo-instructions used during relaxation. */
7517 #define T16_32_TAB \
7518 X(adc, 4140, eb400000), \
7519 X(adcs, 4140, eb500000), \
7520 X(add, 1c00, eb000000), \
7521 X(adds, 1c00, eb100000), \
7522 X(addi, 0000, f1000000), \
7523 X(addis, 0000, f1100000), \
7524 X(add_pc,000f, f20f0000), \
7525 X(add_sp,000d, f10d0000), \
7526 X(adr, 000f, f20f0000), \
7527 X(and, 4000, ea000000), \
7528 X(ands, 4000, ea100000), \
7529 X(asr, 1000, fa40f000), \
7530 X(asrs, 1000, fa50f000), \
7531 X(b, e000, f000b000), \
7532 X(bcond, d000, f0008000), \
7533 X(bic, 4380, ea200000), \
7534 X(bics, 4380, ea300000), \
7535 X(cmn, 42c0, eb100f00), \
7536 X(cmp, 2800, ebb00f00), \
7537 X(cpsie, b660, f3af8400), \
7538 X(cpsid, b670, f3af8600), \
7539 X(cpy, 4600, ea4f0000), \
7540 X(dec_sp,80dd, f1bd0d00), \
7541 X(eor, 4040, ea800000), \
7542 X(eors, 4040, ea900000), \
7543 X(inc_sp,00dd, f10d0d00), \
7544 X(ldmia, c800, e8900000), \
7545 X(ldr, 6800, f8500000), \
7546 X(ldrb, 7800, f8100000), \
7547 X(ldrh, 8800, f8300000), \
7548 X(ldrsb, 5600, f9100000), \
7549 X(ldrsh, 5e00, f9300000), \
7550 X(ldr_pc,4800, f85f0000), \
7551 X(ldr_pc2,4800, f85f0000), \
7552 X(ldr_sp,9800, f85d0000), \
7553 X(lsl, 0000, fa00f000), \
7554 X(lsls, 0000, fa10f000), \
7555 X(lsr, 0800, fa20f000), \
7556 X(lsrs, 0800, fa30f000), \
7557 X(mov, 2000, ea4f0000), \
7558 X(movs, 2000, ea5f0000), \
7559 X(mul, 4340, fb00f000), \
7560 X(muls, 4340, ffffffff), /* no 32b muls */ \
7561 X(mvn, 43c0, ea6f0000), \
7562 X(mvns, 43c0, ea7f0000), \
7563 X(neg, 4240, f1c00000), /* rsb #0 */ \
7564 X(negs, 4240, f1d00000), /* rsbs #0 */ \
7565 X(orr, 4300, ea400000), \
7566 X(orrs, 4300, ea500000), \
7567 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
7568 X(push, b400, e92d0000), /* stmdb sp!,... */ \
7569 X(rev, ba00, fa90f080), \
7570 X(rev16, ba40, fa90f090), \
7571 X(revsh, bac0, fa90f0b0), \
7572 X(ror, 41c0, fa60f000), \
7573 X(rors, 41c0, fa70f000), \
7574 X(sbc, 4180, eb600000), \
7575 X(sbcs, 4180, eb700000), \
7576 X(stmia, c000, e8800000), \
7577 X(str, 6000, f8400000), \
7578 X(strb, 7000, f8000000), \
7579 X(strh, 8000, f8200000), \
7580 X(str_sp,9000, f84d0000), \
7581 X(sub, 1e00, eba00000), \
7582 X(subs, 1e00, ebb00000), \
7583 X(subi, 8000, f1a00000), \
7584 X(subis, 8000, f1b00000), \
7585 X(sxtb, b240, fa4ff080), \
7586 X(sxth, b200, fa0ff080), \
7587 X(tst, 4200, ea100f00), \
7588 X(uxtb, b2c0, fa5ff080), \
7589 X(uxth, b280, fa1ff080), \
7590 X(nop, bf00, f3af8000), \
7591 X(yield, bf10, f3af8001), \
7592 X(wfe, bf20, f3af8002), \
7593 X(wfi, bf30, f3af8003), \
7594 X(sev, bf40, f3af9004), /* typo, 8004? */
7596 /* To catch errors in encoding functions, the codes are all offset by
7597 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
7598 as 16-bit instructions. */
7599 #define X(a,b,c) T_MNEM_##a
7600 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
7603 #define X(a,b,c) 0x##b
7604 static const unsigned short thumb_op16[] = { T16_32_TAB };
7605 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
7608 #define X(a,b,c) 0x##c
7609 static const unsigned int thumb_op32[] = { T16_32_TAB };
7610 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
7611 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
7615 /* Thumb instruction encoders, in alphabetical order. */
7619 do_t_add_sub_w (void)
7623 Rd = inst.operands[0].reg;
7624 Rn = inst.operands[1].reg;
7626 constraint (Rd == 15, _("PC not allowed as destination"));
7627 inst.instruction |= (Rn << 16) | (Rd << 8);
7628 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
7631 /* Parse an add or subtract instruction. We get here with inst.instruction
7632 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7639 Rd = inst.operands[0].reg;
7640 Rs = (inst.operands[1].present
7641 ? inst.operands[1].reg /* Rd, Rs, foo */
7642 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7650 flags = (inst.instruction == T_MNEM_adds
7651 || inst.instruction == T_MNEM_subs);
7653 narrow = (current_it_mask == 0);
7655 narrow = (current_it_mask != 0);
7656 if (!inst.operands[2].isreg)
7659 if (inst.size_req != 4)
7663 add = (inst.instruction == T_MNEM_add
7664 || inst.instruction == T_MNEM_adds);
7665 /* Attempt to use a narrow opcode, with relaxation if
7667 if (Rd == REG_SP && Rs == REG_SP && !flags)
7668 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7669 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7670 opcode = T_MNEM_add_sp;
7671 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7672 opcode = T_MNEM_add_pc;
7673 else if (Rd <= 7 && Rs <= 7 && narrow)
7676 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7678 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7682 inst.instruction = THUMB_OP16(opcode);
7683 inst.instruction |= (Rd << 4) | Rs;
7684 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7685 if (inst.size_req != 2)
7686 inst.relax = opcode;
7689 constraint (inst.size_req == 2, BAD_HIREG);
7691 if (inst.size_req == 4
7692 || (inst.size_req != 2 && !opcode))
7694 /* ??? Convert large immediates to addw/subw. */
7695 inst.instruction = THUMB_OP32 (inst.instruction);
7696 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7697 inst.instruction |= inst.operands[0].reg << 8;
7698 inst.instruction |= inst.operands[1].reg << 16;
7699 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7704 Rn = inst.operands[2].reg;
7705 /* See if we can do this with a 16-bit instruction. */
7706 if (!inst.operands[2].shifted && inst.size_req != 4)
7708 if (Rd > 7 || Rs > 7 || Rn > 7)
7713 inst.instruction = ((inst.instruction == T_MNEM_adds
7714 || inst.instruction == T_MNEM_add)
7717 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7721 if (inst.instruction == T_MNEM_add)
7725 inst.instruction = T_OPCODE_ADD_HI;
7726 inst.instruction |= (Rd & 8) << 4;
7727 inst.instruction |= (Rd & 7);
7728 inst.instruction |= Rn << 3;
7731 /* ... because addition is commutative! */
7734 inst.instruction = T_OPCODE_ADD_HI;
7735 inst.instruction |= (Rd & 8) << 4;
7736 inst.instruction |= (Rd & 7);
7737 inst.instruction |= Rs << 3;
7742 /* If we get here, it can't be done in 16 bits. */
7743 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7744 _("shift must be constant"));
7745 inst.instruction = THUMB_OP32 (inst.instruction);
7746 inst.instruction |= Rd << 8;
7747 inst.instruction |= Rs << 16;
7748 encode_thumb32_shifted_operand (2);
7753 constraint (inst.instruction == T_MNEM_adds
7754 || inst.instruction == T_MNEM_subs,
7757 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7759 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7760 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7763 inst.instruction = (inst.instruction == T_MNEM_add
7765 inst.instruction |= (Rd << 4) | Rs;
7766 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7770 Rn = inst.operands[2].reg;
7771 constraint (inst.operands[2].shifted, _("unshifted register required"));
7773 /* We now have Rd, Rs, and Rn set to registers. */
7774 if (Rd > 7 || Rs > 7 || Rn > 7)
7776 /* Can't do this for SUB. */
7777 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7778 inst.instruction = T_OPCODE_ADD_HI;
7779 inst.instruction |= (Rd & 8) << 4;
7780 inst.instruction |= (Rd & 7);
7782 inst.instruction |= Rn << 3;
7784 inst.instruction |= Rs << 3;
7786 constraint (1, _("dest must overlap one source register"));
7790 inst.instruction = (inst.instruction == T_MNEM_add
7791 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7792 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7800 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7802 /* Defer to section relaxation. */
7803 inst.relax = inst.instruction;
7804 inst.instruction = THUMB_OP16 (inst.instruction);
7805 inst.instruction |= inst.operands[0].reg << 4;
7807 else if (unified_syntax && inst.size_req != 2)
7809 /* Generate a 32-bit opcode. */
7810 inst.instruction = THUMB_OP32 (inst.instruction);
7811 inst.instruction |= inst.operands[0].reg << 8;
7812 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7813 inst.reloc.pc_rel = 1;
7817 /* Generate a 16-bit opcode. */
7818 inst.instruction = THUMB_OP16 (inst.instruction);
7819 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7820 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7821 inst.reloc.pc_rel = 1;
7823 inst.instruction |= inst.operands[0].reg << 4;
7827 /* Arithmetic instructions for which there is just one 16-bit
7828 instruction encoding, and it allows only two low registers.
7829 For maximal compatibility with ARM syntax, we allow three register
7830 operands even when Thumb-32 instructions are not available, as long
7831 as the first two are identical. For instance, both "sbc r0,r1" and
7832 "sbc r0,r0,r1" are allowed. */
7838 Rd = inst.operands[0].reg;
7839 Rs = (inst.operands[1].present
7840 ? inst.operands[1].reg /* Rd, Rs, foo */
7841 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7842 Rn = inst.operands[2].reg;
7846 if (!inst.operands[2].isreg)
7848 /* For an immediate, we always generate a 32-bit opcode;
7849 section relaxation will shrink it later if possible. */
7850 inst.instruction = THUMB_OP32 (inst.instruction);
7851 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7852 inst.instruction |= Rd << 8;
7853 inst.instruction |= Rs << 16;
7854 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7860 /* See if we can do this with a 16-bit instruction. */
7861 if (THUMB_SETS_FLAGS (inst.instruction))
7862 narrow = current_it_mask == 0;
7864 narrow = current_it_mask != 0;
7866 if (Rd > 7 || Rn > 7 || Rs > 7)
7868 if (inst.operands[2].shifted)
7870 if (inst.size_req == 4)
7876 inst.instruction = THUMB_OP16 (inst.instruction);
7877 inst.instruction |= Rd;
7878 inst.instruction |= Rn << 3;
7882 /* If we get here, it can't be done in 16 bits. */
7883 constraint (inst.operands[2].shifted
7884 && inst.operands[2].immisreg,
7885 _("shift must be constant"));
7886 inst.instruction = THUMB_OP32 (inst.instruction);
7887 inst.instruction |= Rd << 8;
7888 inst.instruction |= Rs << 16;
7889 encode_thumb32_shifted_operand (2);
7894 /* On its face this is a lie - the instruction does set the
7895 flags. However, the only supported mnemonic in this mode
7897 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7899 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7900 _("unshifted register required"));
7901 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7902 constraint (Rd != Rs,
7903 _("dest and source1 must be the same register"));
7905 inst.instruction = THUMB_OP16 (inst.instruction);
7906 inst.instruction |= Rd;
7907 inst.instruction |= Rn << 3;
7911 /* Similarly, but for instructions where the arithmetic operation is
7912 commutative, so we can allow either of them to be different from
7913 the destination operand in a 16-bit instruction. For instance, all
7914 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7921 Rd = inst.operands[0].reg;
7922 Rs = (inst.operands[1].present
7923 ? inst.operands[1].reg /* Rd, Rs, foo */
7924 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7925 Rn = inst.operands[2].reg;
7929 if (!inst.operands[2].isreg)
7931 /* For an immediate, we always generate a 32-bit opcode;
7932 section relaxation will shrink it later if possible. */
7933 inst.instruction = THUMB_OP32 (inst.instruction);
7934 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7935 inst.instruction |= Rd << 8;
7936 inst.instruction |= Rs << 16;
7937 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7943 /* See if we can do this with a 16-bit instruction. */
7944 if (THUMB_SETS_FLAGS (inst.instruction))
7945 narrow = current_it_mask == 0;
7947 narrow = current_it_mask != 0;
7949 if (Rd > 7 || Rn > 7 || Rs > 7)
7951 if (inst.operands[2].shifted)
7953 if (inst.size_req == 4)
7960 inst.instruction = THUMB_OP16 (inst.instruction);
7961 inst.instruction |= Rd;
7962 inst.instruction |= Rn << 3;
7967 inst.instruction = THUMB_OP16 (inst.instruction);
7968 inst.instruction |= Rd;
7969 inst.instruction |= Rs << 3;
7974 /* If we get here, it can't be done in 16 bits. */
7975 constraint (inst.operands[2].shifted
7976 && inst.operands[2].immisreg,
7977 _("shift must be constant"));
7978 inst.instruction = THUMB_OP32 (inst.instruction);
7979 inst.instruction |= Rd << 8;
7980 inst.instruction |= Rs << 16;
7981 encode_thumb32_shifted_operand (2);
7986 /* On its face this is a lie - the instruction does set the
7987 flags. However, the only supported mnemonic in this mode
7989 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7991 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7992 _("unshifted register required"));
7993 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7995 inst.instruction = THUMB_OP16 (inst.instruction);
7996 inst.instruction |= Rd;
7999 inst.instruction |= Rn << 3;
8001 inst.instruction |= Rs << 3;
8003 constraint (1, _("dest must overlap one source register"));
8010 if (inst.operands[0].present)
8012 constraint ((inst.instruction & 0xf0) != 0x40
8013 && inst.operands[0].imm != 0xf,
8014 "bad barrier type");
8015 inst.instruction |= inst.operands[0].imm;
8018 inst.instruction |= 0xf;
8024 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
8025 constraint (msb > 32, _("bit-field extends past end of register"));
8026 /* The instruction encoding stores the LSB and MSB,
8027 not the LSB and width. */
8028 inst.instruction |= inst.operands[0].reg << 8;
8029 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
8030 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
8031 inst.instruction |= msb - 1;
8039 /* #0 in second position is alternative syntax for bfc, which is
8040 the same instruction but with REG_PC in the Rm field. */
8041 if (!inst.operands[1].isreg)
8042 inst.operands[1].reg = REG_PC;
8044 msb = inst.operands[2].imm + inst.operands[3].imm;
8045 constraint (msb > 32, _("bit-field extends past end of register"));
8046 /* The instruction encoding stores the LSB and MSB,
8047 not the LSB and width. */
8048 inst.instruction |= inst.operands[0].reg << 8;
8049 inst.instruction |= inst.operands[1].reg << 16;
8050 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8051 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8052 inst.instruction |= msb - 1;
8058 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
8059 _("bit-field extends past end of register"));
8060 inst.instruction |= inst.operands[0].reg << 8;
8061 inst.instruction |= inst.operands[1].reg << 16;
8062 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
8063 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
8064 inst.instruction |= inst.operands[3].imm - 1;
8067 /* ARM V5 Thumb BLX (argument parse)
8068 BLX <target_addr> which is BLX(1)
8069 BLX <Rm> which is BLX(2)
8070 Unfortunately, there are two different opcodes for this mnemonic.
8071 So, the insns[].value is not used, and the code here zaps values
8072 into inst.instruction.
8074 ??? How to take advantage of the additional two bits of displacement
8075 available in Thumb32 mode? Need new relocation? */
8080 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8081 if (inst.operands[0].isreg)
8082 /* We have a register, so this is BLX(2). */
8083 inst.instruction |= inst.operands[0].reg << 3;
8086 /* No register. This must be BLX(1). */
8087 inst.instruction = 0xf000e800;
8089 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
8090 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8093 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
8094 inst.reloc.pc_rel = 1;
8104 if (current_it_mask)
8106 /* Conditional branches inside IT blocks are encoded as unconditional
8109 /* A branch must be the last instruction in an IT block. */
8110 constraint (current_it_mask != 0x10, BAD_BRANCH);
8115 if (cond != COND_ALWAYS)
8116 opcode = T_MNEM_bcond;
8118 opcode = inst.instruction;
8120 if (unified_syntax && inst.size_req == 4)
8122 inst.instruction = THUMB_OP32(opcode);
8123 if (cond == COND_ALWAYS)
8124 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
8127 assert (cond != 0xF);
8128 inst.instruction |= cond << 22;
8129 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
8134 inst.instruction = THUMB_OP16(opcode);
8135 if (cond == COND_ALWAYS)
8136 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
8139 inst.instruction |= cond << 8;
8140 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
8142 /* Allow section relaxation. */
8143 if (unified_syntax && inst.size_req != 2)
8144 inst.relax = opcode;
8147 inst.reloc.pc_rel = 1;
8153 constraint (inst.cond != COND_ALWAYS,
8154 _("instruction is always unconditional"));
8155 if (inst.operands[0].present)
8157 constraint (inst.operands[0].imm > 255,
8158 _("immediate value out of range"));
8159 inst.instruction |= inst.operands[0].imm;
8164 do_t_branch23 (void)
8166 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8167 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
8168 inst.reloc.pc_rel = 1;
8170 /* If the destination of the branch is a defined symbol which does not have
8171 the THUMB_FUNC attribute, then we must be calling a function which has
8172 the (interfacearm) attribute. We look for the Thumb entry point to that
8173 function and change the branch to refer to that function instead. */
8174 if ( inst.reloc.exp.X_op == O_symbol
8175 && inst.reloc.exp.X_add_symbol != NULL
8176 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
8177 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
8178 inst.reloc.exp.X_add_symbol =
8179 find_real_start (inst.reloc.exp.X_add_symbol);
8185 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8186 inst.instruction |= inst.operands[0].reg << 3;
8187 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
8188 should cause the alignment to be checked once it is known. This is
8189 because BX PC only works if the instruction is word aligned. */
8195 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8196 if (inst.operands[0].reg == REG_PC)
8197 as_tsktsk (_("use of r15 in bxj is not really useful"));
8199 inst.instruction |= inst.operands[0].reg << 16;
8205 inst.instruction |= inst.operands[0].reg << 8;
8206 inst.instruction |= inst.operands[1].reg << 16;
8207 inst.instruction |= inst.operands[1].reg;
8213 constraint (current_it_mask, BAD_NOT_IT);
8214 inst.instruction |= inst.operands[0].imm;
8220 constraint (current_it_mask, BAD_NOT_IT);
8222 && (inst.operands[1].present || inst.size_req == 4)
8223 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
8225 unsigned int imod = (inst.instruction & 0x0030) >> 4;
8226 inst.instruction = 0xf3af8000;
8227 inst.instruction |= imod << 9;
8228 inst.instruction |= inst.operands[0].imm << 5;
8229 if (inst.operands[1].present)
8230 inst.instruction |= 0x100 | inst.operands[1].imm;
8234 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
8235 && (inst.operands[0].imm & 4),
8236 _("selected processor does not support 'A' form "
8237 "of this instruction"));
8238 constraint (inst.operands[1].present || inst.size_req == 4,
8239 _("Thumb does not support the 2-argument "
8240 "form of this instruction"));
8241 inst.instruction |= inst.operands[0].imm;
8245 /* THUMB CPY instruction (argument parse). */
8250 if (inst.size_req == 4)
8252 inst.instruction = THUMB_OP32 (T_MNEM_mov);
8253 inst.instruction |= inst.operands[0].reg << 8;
8254 inst.instruction |= inst.operands[1].reg;
8258 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8259 inst.instruction |= (inst.operands[0].reg & 0x7);
8260 inst.instruction |= inst.operands[1].reg << 3;
8267 constraint (current_it_mask, BAD_NOT_IT);
8268 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8269 inst.instruction |= inst.operands[0].reg;
8270 inst.reloc.pc_rel = 1;
8271 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
8277 inst.instruction |= inst.operands[0].imm;
8283 if (!inst.operands[1].present)
8284 inst.operands[1].reg = inst.operands[0].reg;
8285 inst.instruction |= inst.operands[0].reg << 8;
8286 inst.instruction |= inst.operands[1].reg << 16;
8287 inst.instruction |= inst.operands[2].reg;
8293 if (unified_syntax && inst.size_req == 4)
8294 inst.instruction = THUMB_OP32 (inst.instruction);
8296 inst.instruction = THUMB_OP16 (inst.instruction);
8302 unsigned int cond = inst.operands[0].imm;
8304 constraint (current_it_mask, BAD_NOT_IT);
8305 current_it_mask = (inst.instruction & 0xf) | 0x10;
8308 /* If the condition is a negative condition, invert the mask. */
8309 if ((cond & 0x1) == 0x0)
8311 unsigned int mask = inst.instruction & 0x000f;
8313 if ((mask & 0x7) == 0)
8314 /* no conversion needed */;
8315 else if ((mask & 0x3) == 0)
8317 else if ((mask & 0x1) == 0)
8322 inst.instruction &= 0xfff0;
8323 inst.instruction |= mask;
8326 inst.instruction |= cond << 4;
8332 /* This really doesn't seem worth it. */
8333 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8334 _("expression too complex"));
8335 constraint (inst.operands[1].writeback,
8336 _("Thumb load/store multiple does not support {reglist}^"));
8340 /* See if we can use a 16-bit instruction. */
8341 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
8342 && inst.size_req != 4
8343 && inst.operands[0].reg <= 7
8344 && !(inst.operands[1].imm & ~0xff)
8345 && (inst.instruction == T_MNEM_stmia
8346 ? inst.operands[0].writeback
8347 : (inst.operands[0].writeback
8348 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
8350 if (inst.instruction == T_MNEM_stmia
8351 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
8352 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8353 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8354 inst.operands[0].reg);
8356 inst.instruction = THUMB_OP16 (inst.instruction);
8357 inst.instruction |= inst.operands[0].reg << 8;
8358 inst.instruction |= inst.operands[1].imm;
8362 if (inst.operands[1].imm & (1 << 13))
8363 as_warn (_("SP should not be in register list"));
8364 if (inst.instruction == T_MNEM_stmia)
8366 if (inst.operands[1].imm & (1 << 15))
8367 as_warn (_("PC should not be in register list"));
8368 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
8369 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8370 inst.operands[0].reg);
8374 if (inst.operands[1].imm & (1 << 14)
8375 && inst.operands[1].imm & (1 << 15))
8376 as_warn (_("LR and PC should not both be in register list"));
8377 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8378 && inst.operands[0].writeback)
8379 as_warn (_("base register should not be in register list "
8380 "when written back"));
8382 if (inst.instruction < 0xffff)
8383 inst.instruction = THUMB_OP32 (inst.instruction);
8384 inst.instruction |= inst.operands[0].reg << 16;
8385 inst.instruction |= inst.operands[1].imm;
8386 if (inst.operands[0].writeback)
8387 inst.instruction |= WRITE_BACK;
8392 constraint (inst.operands[0].reg > 7
8393 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
8394 if (inst.instruction == T_MNEM_stmia)
8396 if (!inst.operands[0].writeback)
8397 as_warn (_("this instruction will write back the base register"));
8398 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
8399 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
8400 as_warn (_("value stored for r%d is UNPREDICTABLE"),
8401 inst.operands[0].reg);
8405 if (!inst.operands[0].writeback
8406 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
8407 as_warn (_("this instruction will write back the base register"));
8408 else if (inst.operands[0].writeback
8409 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
8410 as_warn (_("this instruction will not write back the base register"));
8413 inst.instruction = THUMB_OP16 (inst.instruction);
8414 inst.instruction |= inst.operands[0].reg << 8;
8415 inst.instruction |= inst.operands[1].imm;
8422 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
8423 || inst.operands[1].postind || inst.operands[1].writeback
8424 || inst.operands[1].immisreg || inst.operands[1].shifted
8425 || inst.operands[1].negative,
8428 inst.instruction |= inst.operands[0].reg << 12;
8429 inst.instruction |= inst.operands[1].reg << 16;
8430 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8436 if (!inst.operands[1].present)
8438 constraint (inst.operands[0].reg == REG_LR,
8439 _("r14 not allowed as first register "
8440 "when second register is omitted"));
8441 inst.operands[1].reg = inst.operands[0].reg + 1;
8443 constraint (inst.operands[0].reg == inst.operands[1].reg,
8446 inst.instruction |= inst.operands[0].reg << 12;
8447 inst.instruction |= inst.operands[1].reg << 8;
8448 inst.instruction |= inst.operands[2].reg << 16;
8454 unsigned long opcode;
8457 opcode = inst.instruction;
8460 if (!inst.operands[1].isreg)
8462 if (opcode <= 0xffff)
8463 inst.instruction = THUMB_OP32 (opcode);
8464 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8467 if (inst.operands[1].isreg
8468 && !inst.operands[1].writeback
8469 && !inst.operands[1].shifted && !inst.operands[1].postind
8470 && !inst.operands[1].negative && inst.operands[0].reg <= 7
8472 && inst.size_req != 4)
8474 /* Insn may have a 16-bit form. */
8475 Rn = inst.operands[1].reg;
8476 if (inst.operands[1].immisreg)
8478 inst.instruction = THUMB_OP16 (opcode);
8480 if (Rn <= 7 && inst.operands[1].imm <= 7)
8483 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
8484 && opcode != T_MNEM_ldrsb)
8485 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
8486 || (Rn == REG_SP && opcode == T_MNEM_str))
8493 if (inst.reloc.pc_rel)
8494 opcode = T_MNEM_ldr_pc2;
8496 opcode = T_MNEM_ldr_pc;
8500 if (opcode == T_MNEM_ldr)
8501 opcode = T_MNEM_ldr_sp;
8503 opcode = T_MNEM_str_sp;
8505 inst.instruction = inst.operands[0].reg << 8;
8509 inst.instruction = inst.operands[0].reg;
8510 inst.instruction |= inst.operands[1].reg << 3;
8512 inst.instruction |= THUMB_OP16 (opcode);
8513 if (inst.size_req == 2)
8514 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8516 inst.relax = opcode;
8520 /* Definitely a 32-bit variant. */
8521 inst.instruction = THUMB_OP32 (opcode);
8522 inst.instruction |= inst.operands[0].reg << 12;
8523 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
8527 constraint (inst.operands[0].reg > 7, BAD_HIREG);
8529 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
8531 /* Only [Rn,Rm] is acceptable. */
8532 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
8533 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
8534 || inst.operands[1].postind || inst.operands[1].shifted
8535 || inst.operands[1].negative,
8536 _("Thumb does not support this addressing mode"));
8537 inst.instruction = THUMB_OP16 (inst.instruction);
8541 inst.instruction = THUMB_OP16 (inst.instruction);
8542 if (!inst.operands[1].isreg)
8543 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
8546 constraint (!inst.operands[1].preind
8547 || inst.operands[1].shifted
8548 || inst.operands[1].writeback,
8549 _("Thumb does not support this addressing mode"));
8550 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
8552 constraint (inst.instruction & 0x0600,
8553 _("byte or halfword not valid for base register"));
8554 constraint (inst.operands[1].reg == REG_PC
8555 && !(inst.instruction & THUMB_LOAD_BIT),
8556 _("r15 based store not allowed"));
8557 constraint (inst.operands[1].immisreg,
8558 _("invalid base register for register offset"));
8560 if (inst.operands[1].reg == REG_PC)
8561 inst.instruction = T_OPCODE_LDR_PC;
8562 else if (inst.instruction & THUMB_LOAD_BIT)
8563 inst.instruction = T_OPCODE_LDR_SP;
8565 inst.instruction = T_OPCODE_STR_SP;
8567 inst.instruction |= inst.operands[0].reg << 8;
8568 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8572 constraint (inst.operands[1].reg > 7, BAD_HIREG);
8573 if (!inst.operands[1].immisreg)
8575 /* Immediate offset. */
8576 inst.instruction |= inst.operands[0].reg;
8577 inst.instruction |= inst.operands[1].reg << 3;
8578 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
8582 /* Register offset. */
8583 constraint (inst.operands[1].imm > 7, BAD_HIREG);
8584 constraint (inst.operands[1].negative,
8585 _("Thumb does not support this addressing mode"));
8588 switch (inst.instruction)
8590 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
8591 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
8592 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
8593 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
8594 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
8595 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
8596 case 0x5600 /* ldrsb */:
8597 case 0x5e00 /* ldrsh */: break;
8601 inst.instruction |= inst.operands[0].reg;
8602 inst.instruction |= inst.operands[1].reg << 3;
8603 inst.instruction |= inst.operands[1].imm << 6;
8609 if (!inst.operands[1].present)
8611 inst.operands[1].reg = inst.operands[0].reg + 1;
8612 constraint (inst.operands[0].reg == REG_LR,
8613 _("r14 not allowed here"));
8615 inst.instruction |= inst.operands[0].reg << 12;
8616 inst.instruction |= inst.operands[1].reg << 8;
8617 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
8624 inst.instruction |= inst.operands[0].reg << 12;
8625 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
8631 inst.instruction |= inst.operands[0].reg << 8;
8632 inst.instruction |= inst.operands[1].reg << 16;
8633 inst.instruction |= inst.operands[2].reg;
8634 inst.instruction |= inst.operands[3].reg << 12;
8640 inst.instruction |= inst.operands[0].reg << 12;
8641 inst.instruction |= inst.operands[1].reg << 8;
8642 inst.instruction |= inst.operands[2].reg << 16;
8643 inst.instruction |= inst.operands[3].reg;
8651 int r0off = (inst.instruction == T_MNEM_mov
8652 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8653 unsigned long opcode;
8655 bfd_boolean low_regs;
8657 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8658 opcode = inst.instruction;
8659 if (current_it_mask)
8660 narrow = opcode != T_MNEM_movs;
8662 narrow = opcode != T_MNEM_movs || low_regs;
8663 if (inst.size_req == 4
8664 || inst.operands[1].shifted)
8667 if (!inst.operands[1].isreg)
8669 /* Immediate operand. */
8670 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8672 if (low_regs && narrow)
8674 inst.instruction = THUMB_OP16 (opcode);
8675 inst.instruction |= inst.operands[0].reg << 8;
8676 if (inst.size_req == 2)
8677 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8679 inst.relax = opcode;
8683 inst.instruction = THUMB_OP32 (inst.instruction);
8684 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8685 inst.instruction |= inst.operands[0].reg << r0off;
8686 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8691 inst.instruction = THUMB_OP32 (inst.instruction);
8692 inst.instruction |= inst.operands[0].reg << r0off;
8693 encode_thumb32_shifted_operand (1);
8696 switch (inst.instruction)
8699 inst.instruction = T_OPCODE_MOV_HR;
8700 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8701 inst.instruction |= (inst.operands[0].reg & 0x7);
8702 inst.instruction |= inst.operands[1].reg << 3;
8706 /* We know we have low registers at this point.
8707 Generate ADD Rd, Rs, #0. */
8708 inst.instruction = T_OPCODE_ADD_I3;
8709 inst.instruction |= inst.operands[0].reg;
8710 inst.instruction |= inst.operands[1].reg << 3;
8716 inst.instruction = T_OPCODE_CMP_LR;
8717 inst.instruction |= inst.operands[0].reg;
8718 inst.instruction |= inst.operands[1].reg << 3;
8722 inst.instruction = T_OPCODE_CMP_HR;
8723 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8724 inst.instruction |= (inst.operands[0].reg & 0x7);
8725 inst.instruction |= inst.operands[1].reg << 3;
8732 inst.instruction = THUMB_OP16 (inst.instruction);
8733 if (inst.operands[1].isreg)
8735 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8737 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8738 since a MOV instruction produces unpredictable results. */
8739 if (inst.instruction == T_OPCODE_MOV_I8)
8740 inst.instruction = T_OPCODE_ADD_I3;
8742 inst.instruction = T_OPCODE_CMP_LR;
8744 inst.instruction |= inst.operands[0].reg;
8745 inst.instruction |= inst.operands[1].reg << 3;
8749 if (inst.instruction == T_OPCODE_MOV_I8)
8750 inst.instruction = T_OPCODE_MOV_HR;
8752 inst.instruction = T_OPCODE_CMP_HR;
8758 constraint (inst.operands[0].reg > 7,
8759 _("only lo regs allowed with immediate"));
8760 inst.instruction |= inst.operands[0].reg << 8;
8761 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8771 top = (inst.instruction & 0x00800000) != 0;
8772 if (inst.reloc.type == BFD_RELOC_ARM_MOVW)
8774 constraint (top, _(":lower16: not allowed this instruction"));
8775 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVW;
8777 else if (inst.reloc.type == BFD_RELOC_ARM_MOVT)
8779 constraint (!top, _(":upper16: not allowed this instruction"));
8780 inst.reloc.type = BFD_RELOC_ARM_THUMB_MOVT;
8783 inst.instruction |= inst.operands[0].reg << 8;
8784 if (inst.reloc.type == BFD_RELOC_UNUSED)
8786 imm = inst.reloc.exp.X_add_number;
8787 inst.instruction |= (imm & 0xf000) << 4;
8788 inst.instruction |= (imm & 0x0800) << 15;
8789 inst.instruction |= (imm & 0x0700) << 4;
8790 inst.instruction |= (imm & 0x00ff);
8799 int r0off = (inst.instruction == T_MNEM_mvn
8800 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8803 if (inst.size_req == 4
8804 || inst.instruction > 0xffff
8805 || inst.operands[1].shifted
8806 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8808 else if (inst.instruction == T_MNEM_cmn)
8810 else if (THUMB_SETS_FLAGS (inst.instruction))
8811 narrow = (current_it_mask == 0);
8813 narrow = (current_it_mask != 0);
8815 if (!inst.operands[1].isreg)
8817 /* For an immediate, we always generate a 32-bit opcode;
8818 section relaxation will shrink it later if possible. */
8819 if (inst.instruction < 0xffff)
8820 inst.instruction = THUMB_OP32 (inst.instruction);
8821 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8822 inst.instruction |= inst.operands[0].reg << r0off;
8823 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8827 /* See if we can do this with a 16-bit instruction. */
8830 inst.instruction = THUMB_OP16 (inst.instruction);
8831 inst.instruction |= inst.operands[0].reg;
8832 inst.instruction |= inst.operands[1].reg << 3;
8836 constraint (inst.operands[1].shifted
8837 && inst.operands[1].immisreg,
8838 _("shift must be constant"));
8839 if (inst.instruction < 0xffff)
8840 inst.instruction = THUMB_OP32 (inst.instruction);
8841 inst.instruction |= inst.operands[0].reg << r0off;
8842 encode_thumb32_shifted_operand (1);
8848 constraint (inst.instruction > 0xffff
8849 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8850 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8851 _("unshifted register required"));
8852 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8855 inst.instruction = THUMB_OP16 (inst.instruction);
8856 inst.instruction |= inst.operands[0].reg;
8857 inst.instruction |= inst.operands[1].reg << 3;
8865 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8868 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8869 _("selected processor does not support "
8870 "requested special purpose register"));
8874 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8875 _("selected processor does not support "
8876 "requested special purpose register %x"));
8877 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8878 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8879 _("'CPSR' or 'SPSR' expected"));
8882 inst.instruction |= inst.operands[0].reg << 8;
8883 inst.instruction |= (flags & SPSR_BIT) >> 2;
8884 inst.instruction |= inst.operands[1].imm & 0xff;
8892 constraint (!inst.operands[1].isreg,
8893 _("Thumb encoding does not support an immediate here"));
8894 flags = inst.operands[0].imm;
8897 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8898 _("selected processor does not support "
8899 "requested special purpose register"));
8903 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8904 _("selected processor does not support "
8905 "requested special purpose register"));
8908 inst.instruction |= (flags & SPSR_BIT) >> 2;
8909 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8910 inst.instruction |= (flags & 0xff);
8911 inst.instruction |= inst.operands[1].reg << 16;
8917 if (!inst.operands[2].present)
8918 inst.operands[2].reg = inst.operands[0].reg;
8920 /* There is no 32-bit MULS and no 16-bit MUL. */
8921 if (unified_syntax && inst.instruction == T_MNEM_mul)
8923 inst.instruction = THUMB_OP32 (inst.instruction);
8924 inst.instruction |= inst.operands[0].reg << 8;
8925 inst.instruction |= inst.operands[1].reg << 16;
8926 inst.instruction |= inst.operands[2].reg << 0;
8930 constraint (!unified_syntax
8931 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8932 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8935 inst.instruction = THUMB_OP16 (inst.instruction);
8936 inst.instruction |= inst.operands[0].reg;
8938 if (inst.operands[0].reg == inst.operands[1].reg)
8939 inst.instruction |= inst.operands[2].reg << 3;
8940 else if (inst.operands[0].reg == inst.operands[2].reg)
8941 inst.instruction |= inst.operands[1].reg << 3;
8943 constraint (1, _("dest must overlap one source register"));
8950 inst.instruction |= inst.operands[0].reg << 12;
8951 inst.instruction |= inst.operands[1].reg << 8;
8952 inst.instruction |= inst.operands[2].reg << 16;
8953 inst.instruction |= inst.operands[3].reg;
8955 if (inst.operands[0].reg == inst.operands[1].reg)
8956 as_tsktsk (_("rdhi and rdlo must be different"));
8964 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8966 inst.instruction = THUMB_OP32 (inst.instruction);
8967 inst.instruction |= inst.operands[0].imm;
8971 inst.instruction = THUMB_OP16 (inst.instruction);
8972 inst.instruction |= inst.operands[0].imm << 4;
8977 constraint (inst.operands[0].present,
8978 _("Thumb does not support NOP with hints"));
8979 inst.instruction = 0x46c0;
8990 if (THUMB_SETS_FLAGS (inst.instruction))
8991 narrow = (current_it_mask == 0);
8993 narrow = (current_it_mask != 0);
8994 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8996 if (inst.size_req == 4)
9001 inst.instruction = THUMB_OP32 (inst.instruction);
9002 inst.instruction |= inst.operands[0].reg << 8;
9003 inst.instruction |= inst.operands[1].reg << 16;
9007 inst.instruction = THUMB_OP16 (inst.instruction);
9008 inst.instruction |= inst.operands[0].reg;
9009 inst.instruction |= inst.operands[1].reg << 3;
9014 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
9016 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9018 inst.instruction = THUMB_OP16 (inst.instruction);
9019 inst.instruction |= inst.operands[0].reg;
9020 inst.instruction |= inst.operands[1].reg << 3;
9027 inst.instruction |= inst.operands[0].reg << 8;
9028 inst.instruction |= inst.operands[1].reg << 16;
9029 inst.instruction |= inst.operands[2].reg;
9030 if (inst.operands[3].present)
9032 unsigned int val = inst.reloc.exp.X_add_number;
9033 constraint (inst.reloc.exp.X_op != O_constant,
9034 _("expression too complex"));
9035 inst.instruction |= (val & 0x1c) << 10;
9036 inst.instruction |= (val & 0x03) << 6;
9043 if (!inst.operands[3].present)
9044 inst.instruction &= ~0x00000020;
9051 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
9055 do_t_push_pop (void)
9059 constraint (inst.operands[0].writeback,
9060 _("push/pop do not support {reglist}^"));
9061 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
9062 _("expression too complex"));
9064 mask = inst.operands[0].imm;
9065 if ((mask & ~0xff) == 0)
9066 inst.instruction = THUMB_OP16 (inst.instruction);
9067 else if ((inst.instruction == T_MNEM_push
9068 && (mask & ~0xff) == 1 << REG_LR)
9069 || (inst.instruction == T_MNEM_pop
9070 && (mask & ~0xff) == 1 << REG_PC))
9072 inst.instruction = THUMB_OP16 (inst.instruction);
9073 inst.instruction |= THUMB_PP_PC_LR;
9076 else if (unified_syntax)
9078 if (mask & (1 << 13))
9079 inst.error = _("SP not allowed in register list");
9080 if (inst.instruction == T_MNEM_push)
9082 if (mask & (1 << 15))
9083 inst.error = _("PC not allowed in register list");
9087 if (mask & (1 << 14)
9088 && mask & (1 << 15))
9089 inst.error = _("LR and PC should not both be in register list");
9091 if ((mask & (mask - 1)) == 0)
9093 /* Single register push/pop implemented as str/ldr. */
9094 if (inst.instruction == T_MNEM_push)
9095 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
9097 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
9098 mask = ffs(mask) - 1;
9102 inst.instruction = THUMB_OP32 (inst.instruction);
9106 inst.error = _("invalid register list to push/pop instruction");
9110 inst.instruction |= mask;
9116 inst.instruction |= inst.operands[0].reg << 8;
9117 inst.instruction |= inst.operands[1].reg << 16;
9123 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9124 && inst.size_req != 4)
9126 inst.instruction = THUMB_OP16 (inst.instruction);
9127 inst.instruction |= inst.operands[0].reg;
9128 inst.instruction |= inst.operands[1].reg << 3;
9130 else if (unified_syntax)
9132 inst.instruction = THUMB_OP32 (inst.instruction);
9133 inst.instruction |= inst.operands[0].reg << 8;
9134 inst.instruction |= inst.operands[1].reg << 16;
9135 inst.instruction |= inst.operands[1].reg;
9138 inst.error = BAD_HIREG;
9146 Rd = inst.operands[0].reg;
9147 Rs = (inst.operands[1].present
9148 ? inst.operands[1].reg /* Rd, Rs, foo */
9149 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
9151 inst.instruction |= Rd << 8;
9152 inst.instruction |= Rs << 16;
9153 if (!inst.operands[2].isreg)
9155 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
9156 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
9159 encode_thumb32_shifted_operand (2);
9165 constraint (current_it_mask, BAD_NOT_IT);
9166 if (inst.operands[0].imm)
9167 inst.instruction |= 0x8;
9173 if (!inst.operands[1].present)
9174 inst.operands[1].reg = inst.operands[0].reg;
9181 switch (inst.instruction)
9184 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
9186 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
9188 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
9190 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
9194 if (THUMB_SETS_FLAGS (inst.instruction))
9195 narrow = (current_it_mask == 0);
9197 narrow = (current_it_mask != 0);
9198 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
9200 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
9202 if (inst.operands[2].isreg
9203 && (inst.operands[1].reg != inst.operands[0].reg
9204 || inst.operands[2].reg > 7))
9206 if (inst.size_req == 4)
9211 if (inst.operands[2].isreg)
9213 inst.instruction = THUMB_OP32 (inst.instruction);
9214 inst.instruction |= inst.operands[0].reg << 8;
9215 inst.instruction |= inst.operands[1].reg << 16;
9216 inst.instruction |= inst.operands[2].reg;
9220 inst.operands[1].shifted = 1;
9221 inst.operands[1].shift_kind = shift_kind;
9222 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
9223 ? T_MNEM_movs : T_MNEM_mov);
9224 inst.instruction |= inst.operands[0].reg << 8;
9225 encode_thumb32_shifted_operand (1);
9226 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
9227 inst.reloc.type = BFD_RELOC_UNUSED;
9232 if (inst.operands[2].isreg)
9236 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
9237 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
9238 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
9239 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
9243 inst.instruction |= inst.operands[0].reg;
9244 inst.instruction |= inst.operands[2].reg << 3;
9250 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
9251 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
9252 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
9255 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9256 inst.instruction |= inst.operands[0].reg;
9257 inst.instruction |= inst.operands[1].reg << 3;
9263 constraint (inst.operands[0].reg > 7
9264 || inst.operands[1].reg > 7, BAD_HIREG);
9265 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
9267 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
9269 constraint (inst.operands[2].reg > 7, BAD_HIREG);
9270 constraint (inst.operands[0].reg != inst.operands[1].reg,
9271 _("source1 and dest must be same register"));
9273 switch (inst.instruction)
9275 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
9276 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
9277 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
9278 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
9282 inst.instruction |= inst.operands[0].reg;
9283 inst.instruction |= inst.operands[2].reg << 3;
9287 switch (inst.instruction)
9289 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
9290 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
9291 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
9292 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
9295 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
9296 inst.instruction |= inst.operands[0].reg;
9297 inst.instruction |= inst.operands[1].reg << 3;
9305 inst.instruction |= inst.operands[0].reg << 8;
9306 inst.instruction |= inst.operands[1].reg << 16;
9307 inst.instruction |= inst.operands[2].reg;
9313 unsigned int value = inst.reloc.exp.X_add_number;
9314 constraint (inst.reloc.exp.X_op != O_constant,
9315 _("expression too complex"));
9316 inst.reloc.type = BFD_RELOC_UNUSED;
9317 inst.instruction |= (value & 0xf000) >> 12;
9318 inst.instruction |= (value & 0x0ff0);
9319 inst.instruction |= (value & 0x000f) << 16;
9325 inst.instruction |= inst.operands[0].reg << 8;
9326 inst.instruction |= inst.operands[1].imm - 1;
9327 inst.instruction |= inst.operands[2].reg << 16;
9329 if (inst.operands[3].present)
9331 constraint (inst.reloc.exp.X_op != O_constant,
9332 _("expression too complex"));
9334 if (inst.reloc.exp.X_add_number != 0)
9336 if (inst.operands[3].shift_kind == SHIFT_ASR)
9337 inst.instruction |= 0x00200000; /* sh bit */
9338 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9339 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9341 inst.reloc.type = BFD_RELOC_UNUSED;
9348 inst.instruction |= inst.operands[0].reg << 8;
9349 inst.instruction |= inst.operands[1].imm - 1;
9350 inst.instruction |= inst.operands[2].reg << 16;
9356 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
9357 || inst.operands[2].postind || inst.operands[2].writeback
9358 || inst.operands[2].immisreg || inst.operands[2].shifted
9359 || inst.operands[2].negative,
9362 inst.instruction |= inst.operands[0].reg << 8;
9363 inst.instruction |= inst.operands[1].reg << 12;
9364 inst.instruction |= inst.operands[2].reg << 16;
9365 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
9371 if (!inst.operands[2].present)
9372 inst.operands[2].reg = inst.operands[1].reg + 1;
9374 constraint (inst.operands[0].reg == inst.operands[1].reg
9375 || inst.operands[0].reg == inst.operands[2].reg
9376 || inst.operands[0].reg == inst.operands[3].reg
9377 || inst.operands[1].reg == inst.operands[2].reg,
9380 inst.instruction |= inst.operands[0].reg;
9381 inst.instruction |= inst.operands[1].reg << 12;
9382 inst.instruction |= inst.operands[2].reg << 8;
9383 inst.instruction |= inst.operands[3].reg << 16;
9389 inst.instruction |= inst.operands[0].reg << 8;
9390 inst.instruction |= inst.operands[1].reg << 16;
9391 inst.instruction |= inst.operands[2].reg;
9392 inst.instruction |= inst.operands[3].imm << 4;
9398 if (inst.instruction <= 0xffff && inst.size_req != 4
9399 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
9400 && (!inst.operands[2].present || inst.operands[2].imm == 0))
9402 inst.instruction = THUMB_OP16 (inst.instruction);
9403 inst.instruction |= inst.operands[0].reg;
9404 inst.instruction |= inst.operands[1].reg << 3;
9406 else if (unified_syntax)
9408 if (inst.instruction <= 0xffff)
9409 inst.instruction = THUMB_OP32 (inst.instruction);
9410 inst.instruction |= inst.operands[0].reg << 8;
9411 inst.instruction |= inst.operands[1].reg;
9412 inst.instruction |= inst.operands[2].imm << 4;
9416 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
9417 _("Thumb encoding does not support rotation"));
9418 constraint (1, BAD_HIREG);
9425 inst.reloc.type = BFD_RELOC_ARM_SWI;
9433 half = (inst.instruction & 0x10) != 0;
9434 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
9435 constraint (inst.operands[0].immisreg,
9436 _("instruction requires register index"));
9437 constraint (inst.operands[0].imm == 15,
9438 _("PC is not a valid index register"));
9439 constraint (!half && inst.operands[0].shifted,
9440 _("instruction does not allow shifted index"));
9441 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
9447 inst.instruction |= inst.operands[0].reg << 8;
9448 inst.instruction |= inst.operands[1].imm;
9449 inst.instruction |= inst.operands[2].reg << 16;
9451 if (inst.operands[3].present)
9453 constraint (inst.reloc.exp.X_op != O_constant,
9454 _("expression too complex"));
9455 if (inst.reloc.exp.X_add_number != 0)
9457 if (inst.operands[3].shift_kind == SHIFT_ASR)
9458 inst.instruction |= 0x00200000; /* sh bit */
9460 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
9461 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
9463 inst.reloc.type = BFD_RELOC_UNUSED;
9470 inst.instruction |= inst.operands[0].reg << 8;
9471 inst.instruction |= inst.operands[1].imm;
9472 inst.instruction |= inst.operands[2].reg << 16;
9475 /* Neon instruction encoder helpers. */
9477 /* Encodings for the different types for various Neon opcodes. */
9479 /* An "invalid" code for the following tables. */
9482 struct neon_tab_entry
9485 unsigned float_or_poly;
9486 unsigned scalar_or_imm;
9489 /* Map overloaded Neon opcodes to their respective encodings. */
9490 #define NEON_ENC_TAB \
9491 X(vabd, 0x0000700, 0x1200d00, N_INV), \
9492 X(vmax, 0x0000600, 0x0000f00, N_INV), \
9493 X(vmin, 0x0000610, 0x0200f00, N_INV), \
9494 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
9495 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
9496 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
9497 X(vadd, 0x0000800, 0x0000d00, N_INV), \
9498 X(vsub, 0x1000800, 0x0200d00, N_INV), \
9499 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
9500 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
9501 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
9502 /* Register variants of the following two instructions are encoded as
9503 vcge / vcgt with the operands reversed. */ \
9504 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
9505 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
9506 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
9507 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
9508 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
9509 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
9510 X(vmlal, 0x0800800, N_INV, 0x0800240), \
9511 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
9512 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
9513 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
9514 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
9515 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
9516 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
9517 X(vshl, 0x0000400, N_INV, 0x0800510), \
9518 X(vqshl, 0x0000410, N_INV, 0x0800710), \
9519 X(vand, 0x0000110, N_INV, 0x0800030), \
9520 X(vbic, 0x0100110, N_INV, 0x0800030), \
9521 X(veor, 0x1000110, N_INV, N_INV), \
9522 X(vorn, 0x0300110, N_INV, 0x0800010), \
9523 X(vorr, 0x0200110, N_INV, 0x0800010), \
9524 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
9525 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
9526 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
9527 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
9528 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
9529 X(vst1, 0x0000000, 0x0800000, N_INV), \
9530 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
9531 X(vst2, 0x0000100, 0x0800100, N_INV), \
9532 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
9533 X(vst3, 0x0000200, 0x0800200, N_INV), \
9534 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
9535 X(vst4, 0x0000300, 0x0800300, N_INV), \
9536 X(vmovn, 0x1b20200, N_INV, N_INV), \
9537 X(vtrn, 0x1b20080, N_INV, N_INV), \
9538 X(vqmovn, 0x1b20200, N_INV, N_INV), \
9539 X(vqmovun, 0x1b20240, N_INV, N_INV)
9543 #define X(OPC,I,F,S) N_MNEM_##OPC
9548 static const struct neon_tab_entry neon_enc_tab[] =
9550 #define X(OPC,I,F,S) { (I), (F), (S) }
9555 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9556 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9557 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9558 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9559 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9560 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9561 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
9562 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
9563 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
9565 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
9566 shapes which an instruction can accept. The following mnemonic characters
9567 are used in the tag names for this enumeration:
9569 D - Neon D<n> register
9570 Q - Neon Q<n> register
9574 L - D<n> register list
9615 /* Bit masks used in type checking given instructions.
9616 'N_EQK' means the type must be the same as (or based on in some way) the key
9617 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
9618 set, various other bits can be set as well in order to modify the meaning of
9619 the type constraint. */
9642 N_KEY = 0x080000, /* key element (main type specifier). */
9643 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
9644 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
9645 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
9646 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
9647 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
9648 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
9649 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9650 N_SIZ = 0x000040, /* if N_EQK, this operand is forced to be size-only. */
9652 N_MAX_NONSPECIAL = N_F32
9655 #define N_ALLMODS (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
9657 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9658 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9659 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9660 #define N_SUF_32 (N_SU_32 | N_F32)
9661 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9662 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9664 /* Pass this as the first type argument to neon_check_type to ignore types
9666 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9668 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9669 specific shape when there are two alternatives. For non-polymorphic shapes,
9670 checking is done during operand parsing, so is not implemented here. */
9672 static enum neon_shape
9673 neon_check_shape (enum neon_shape req)
9675 #define RR(X) (inst.operands[(X)].isreg)
9676 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9677 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9678 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9679 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9681 /* Fix missing optional operands. FIXME: we don't know at this point how
9682 many arguments we should have, so this makes the assumption that we have
9683 > 1. This is true of all current Neon opcodes, I think, but may not be
9684 true in the future. */
9685 if (!inst.operands[1].present)
9686 inst.operands[1] = inst.operands[0];
9692 if (RD(0) && RD(1) && RD(2))
9694 else if (RQ(0) && RQ(1) && RQ(2))
9697 first_error (_("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9704 if (RD(0) && RD(1) && IM(2))
9706 else if (RQ(0) && RQ(1) && IM(2))
9709 first_error (_("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9716 if (RD(0) && RD(1) && RD(2) && IM(3))
9718 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9721 first_error (_("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9722 "<Dd>, <Dn>, <Dm>, #<imm> operands"));
9728 if (RD(0) && RD(1) && SC(2))
9730 else if (RQ(0) && RQ(1) && SC(2))
9733 first_error (_("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9742 else if (RQ(0) && RQ(1))
9745 first_error (_("expected <Qd>, <Qm> or <Dd>, <Dm> operands"));
9753 else if (RQ(0) && SC(1))
9756 first_error (_("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands"));
9764 else if (RQ(0) && RR(1))
9767 first_error (_("expected <Qd>, <Rm> or <Dd>, <Rm> operands"));
9775 else if (RQ(0) && IM(1))
9778 first_error (_("expected <Qd>, #<imm> or <Dd>, #<imm> operands"));
9795 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9798 /* Allow modification to be made to types which are constrained to be
9799 based on the key element, based on bits set alongside N_EQK. */
9800 if ((typebits & N_EQK) != 0)
9802 if ((typebits & N_HLF) != 0)
9804 else if ((typebits & N_DBL) != 0)
9806 if ((typebits & N_SGN) != 0)
9807 *g_type = NT_signed;
9808 else if ((typebits & N_UNS) != 0)
9809 *g_type = NT_unsigned;
9810 else if ((typebits & N_INT) != 0)
9811 *g_type = NT_integer;
9812 else if ((typebits & N_FLT) != 0)
9814 else if ((typebits & N_SIZ) != 0)
9815 *g_type = NT_untyped;
9819 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9820 operand type, i.e. the single type specified in a Neon instruction when it
9821 is the only one given. */
9823 static struct neon_type_el
9824 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9826 struct neon_type_el dest = *key;
9828 assert ((thisarg & N_EQK) != 0);
9830 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9835 /* Convert Neon type and size into compact bitmask representation. */
9837 static enum neon_type_mask
9838 type_chk_of_el_type (enum neon_el_type type, unsigned size)
9846 case 16: return N_16;
9847 case 32: return N_32;
9848 case 64: return N_64;
9856 case 8: return N_I8;
9857 case 16: return N_I16;
9858 case 32: return N_I32;
9859 case 64: return N_I64;
9872 case 8: return N_P8;
9873 case 16: return N_P16;
9881 case 8: return N_S8;
9882 case 16: return N_S16;
9883 case 32: return N_S32;
9884 case 64: return N_S64;
9892 case 8: return N_U8;
9893 case 16: return N_U16;
9894 case 32: return N_U32;
9895 case 64: return N_U64;
9906 /* Convert compact Neon bitmask type representation to a type and size. Only
9907 handles the case where a single bit is set in the mask. */
9910 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9911 enum neon_type_mask mask)
9913 if ((mask & N_EQK) != 0)
9916 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9918 else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9920 else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9922 else if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9927 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9929 else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9930 *type = NT_unsigned;
9931 else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9933 else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9935 else if ((mask & (N_P8 | N_P16)) != 0)
9937 else if ((mask & N_F32) != 0)
9945 /* Modify a bitmask of allowed types. This is only needed for type
9949 modify_types_allowed (unsigned allowed, unsigned mods)
9952 enum neon_el_type type;
9958 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9960 if (el_type_of_type_chk (&type, &size, allowed & i) == SUCCESS)
9962 neon_modify_type_size (mods, &type, &size);
9963 destmask |= type_chk_of_el_type (type, size);
9970 /* Check type and return type classification.
9971 The manual states (paraphrase): If one datatype is given, it indicates the
9973 - the second operand, if there is one
9974 - the operand, if there is no second operand
9975 - the result, if there are no operands.
9976 This isn't quite good enough though, so we use a concept of a "key" datatype
9977 which is set on a per-instruction basis, which is the one which matters when
9978 only one data type is written.
9979 Note: this function has side-effects (e.g. filling in missing operands). All
9980 Neon instructions should call it before performing bit encoding.
9983 static struct neon_type_el
9984 neon_check_type (unsigned els, enum neon_shape ns, ...)
9987 unsigned i, pass, key_el = 0;
9988 unsigned types[NEON_MAX_TYPE_ELS];
9989 enum neon_el_type k_type = NT_invtype;
9990 unsigned k_size = -1u;
9991 struct neon_type_el badtype = {NT_invtype, -1};
9992 unsigned key_allowed = 0;
9994 /* Optional registers in Neon instructions are always (not) in operand 1.
9995 Fill in the missing operand here, if it was omitted. */
9996 if (els > 1 && !inst.operands[1].present)
9997 inst.operands[1] = inst.operands[0];
9999 /* Suck up all the varargs. */
10001 for (i = 0; i < els; i++)
10003 unsigned thisarg = va_arg (ap, unsigned);
10004 if (thisarg == N_IGNORE_TYPE)
10009 types[i] = thisarg;
10010 if ((thisarg & N_KEY) != 0)
10015 if (inst.vectype.elems > 0)
10016 for (i = 0; i < els; i++)
10017 if (inst.operands[i].vectype.type != NT_invtype)
10019 first_error (_("types specified in both the mnemonic and operands"));
10023 /* Duplicate inst.vectype elements here as necessary.
10024 FIXME: No idea if this is exactly the same as the ARM assembler,
10025 particularly when an insn takes one register and one non-register
10027 if (inst.vectype.elems == 1 && els > 1)
10030 inst.vectype.elems = els;
10031 inst.vectype.el[key_el] = inst.vectype.el[0];
10032 for (j = 0; j < els; j++)
10034 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10037 else if (inst.vectype.elems == 0 && els > 0)
10040 /* No types were given after the mnemonic, so look for types specified
10041 after each operand. We allow some flexibility here; as long as the
10042 "key" operand has a type, we can infer the others. */
10043 for (j = 0; j < els; j++)
10044 if (inst.operands[j].vectype.type != NT_invtype)
10045 inst.vectype.el[j] = inst.operands[j].vectype;
10047 if (inst.operands[key_el].vectype.type != NT_invtype)
10049 for (j = 0; j < els; j++)
10050 if (inst.operands[j].vectype.type == NT_invtype)
10051 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
10056 first_error (_("operand types can't be inferred"));
10060 else if (inst.vectype.elems != els)
10062 first_error (_("type specifier has the wrong number of parts"));
10066 for (pass = 0; pass < 2; pass++)
10068 for (i = 0; i < els; i++)
10070 unsigned thisarg = types[i];
10071 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
10072 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
10073 enum neon_el_type g_type = inst.vectype.el[i].type;
10074 unsigned g_size = inst.vectype.el[i].size;
10076 /* Decay more-specific signed & unsigned types to sign-insensitive
10077 integer types if sign-specific variants are unavailable. */
10078 if ((g_type == NT_signed || g_type == NT_unsigned)
10079 && (types_allowed & N_SU_ALL) == 0)
10080 g_type = NT_integer;
10082 /* If only untyped args are allowed, decay any more specific types to
10083 them. Some instructions only care about signs for some element
10084 sizes, so handle that properly. */
10085 if ((g_size == 8 && (types_allowed & N_8) != 0)
10086 || (g_size == 16 && (types_allowed & N_16) != 0)
10087 || (g_size == 32 && (types_allowed & N_32) != 0)
10088 || (g_size == 64 && (types_allowed & N_64) != 0))
10089 g_type = NT_untyped;
10093 if ((thisarg & N_KEY) != 0)
10097 key_allowed = thisarg & ~N_KEY;
10102 if ((thisarg & N_EQK) == 0)
10104 unsigned given_type = type_chk_of_el_type (g_type, g_size);
10106 if ((given_type & types_allowed) == 0)
10108 first_error (_("bad type in Neon instruction"));
10114 enum neon_el_type mod_k_type = k_type;
10115 unsigned mod_k_size = k_size;
10116 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
10117 if (g_type != mod_k_type || g_size != mod_k_size)
10119 first_error (_("inconsistent types in Neon instruction"));
10127 return inst.vectype.el[key_el];
10130 /* Fix up Neon data-processing instructions, ORing in the correct bits for
10131 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
10134 neon_dp_fixup (unsigned i)
10138 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
10152 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
10156 neon_logbits (unsigned x)
10158 return ffs (x) - 4;
10161 #define LOW4(R) ((R) & 0xf)
10162 #define HI1(R) (((R) >> 4) & 1)
10164 /* Encode insns with bit pattern:
10166 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10167 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
10169 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
10170 different meaning for some instruction. */
10173 neon_three_same (int isquad, int ubit, int size)
10175 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10176 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10177 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10178 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10179 inst.instruction |= LOW4 (inst.operands[2].reg);
10180 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10181 inst.instruction |= (isquad != 0) << 6;
10182 inst.instruction |= (ubit != 0) << 24;
10184 inst.instruction |= neon_logbits (size) << 20;
10186 inst.instruction = neon_dp_fixup (inst.instruction);
10189 /* Encode instructions of the form:
10191 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
10192 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
10194 Don't write size if SIZE == -1. */
10197 neon_two_same (int qbit, int ubit, int size)
10199 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10200 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10201 inst.instruction |= LOW4 (inst.operands[1].reg);
10202 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10203 inst.instruction |= (qbit != 0) << 6;
10204 inst.instruction |= (ubit != 0) << 24;
10207 inst.instruction |= neon_logbits (size) << 18;
10209 inst.instruction = neon_dp_fixup (inst.instruction);
10212 /* Neon instruction encoders, in approximate order of appearance. */
10215 do_neon_dyadic_i_su (void)
10217 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10218 struct neon_type_el et = neon_check_type (3, rs,
10219 N_EQK, N_EQK, N_SU_32 | N_KEY);
10220 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10224 do_neon_dyadic_i64_su (void)
10226 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10227 struct neon_type_el et = neon_check_type (3, rs,
10228 N_EQK, N_EQK, N_SU_ALL | N_KEY);
10229 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10233 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
10236 unsigned size = et.size >> 3;
10237 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10238 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10239 inst.instruction |= LOW4 (inst.operands[1].reg);
10240 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10241 inst.instruction |= (isquad != 0) << 6;
10242 inst.instruction |= immbits << 16;
10243 inst.instruction |= (size >> 3) << 7;
10244 inst.instruction |= (size & 0x7) << 19;
10246 inst.instruction |= (uval != 0) << 24;
10248 inst.instruction = neon_dp_fixup (inst.instruction);
10252 do_neon_shl_imm (void)
10254 if (!inst.operands[2].isreg)
10256 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10257 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
10258 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10259 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
10263 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10264 struct neon_type_el et = neon_check_type (3, rs,
10265 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10266 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10267 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10272 do_neon_qshl_imm (void)
10274 if (!inst.operands[2].isreg)
10276 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10277 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10278 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10279 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10280 inst.operands[2].imm);
10284 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10285 struct neon_type_el et = neon_check_type (3, rs,
10286 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
10287 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10288 neon_three_same (rs == NS_QQQ, et.type == NT_unsigned, et.size);
10293 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
10295 /* Handle .I8 and .I64 as pseudo-instructions. */
10299 /* Unfortunately, this will make everything apart from zero out-of-range.
10300 FIXME is this the intended semantics? There doesn't seem much point in
10301 accepting .I8 if so. */
10302 immediate |= immediate << 8;
10306 /* Similarly, anything other than zero will be replicated in bits [63:32],
10307 which probably isn't want we want if we specified .I64. */
10308 if (immediate != 0)
10309 goto bad_immediate;
10315 if (immediate == (immediate & 0x000000ff))
10317 *immbits = immediate;
10318 return (size == 16) ? 0x9 : 0x1;
10320 else if (immediate == (immediate & 0x0000ff00))
10322 *immbits = immediate >> 8;
10323 return (size == 16) ? 0xb : 0x3;
10325 else if (immediate == (immediate & 0x00ff0000))
10327 *immbits = immediate >> 16;
10330 else if (immediate == (immediate & 0xff000000))
10332 *immbits = immediate >> 24;
10337 first_error (_("immediate value out of range"));
10341 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
10345 neon_bits_same_in_bytes (unsigned imm)
10347 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
10348 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
10349 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
10350 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
10353 /* For immediate of above form, return 0bABCD. */
10356 neon_squash_bits (unsigned imm)
10358 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
10359 | ((imm & 0x01000000) >> 21);
10362 /* Compress quarter-float representation to 0b...000 abcdefgh. */
10365 neon_qfloat_bits (unsigned imm)
10367 return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
10370 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
10371 the instruction. *OP is passed as the initial value of the op field, and
10372 may be set to a different value depending on the constant (i.e.
10373 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
10377 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
10378 int *op, int size, enum neon_el_type type)
10380 if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
10382 if (size != 32 || *op == 1)
10384 *immbits = neon_qfloat_bits (immlo);
10387 else if (size == 64 && neon_bits_same_in_bytes (immhi)
10388 && neon_bits_same_in_bytes (immlo))
10390 /* Check this one first so we don't have to bother with immhi in later
10394 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
10398 else if (immhi != 0)
10400 else if (immlo == (immlo & 0x000000ff))
10402 /* 64-bit case was already handled. Don't allow MVN with 8-bit
10404 if ((size != 8 && size != 16 && size != 32)
10405 || (size == 8 && *op == 1))
10408 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
10410 else if (immlo == (immlo & 0x0000ff00))
10412 if (size != 16 && size != 32)
10414 *immbits = immlo >> 8;
10415 return (size == 16) ? 0xa : 0x2;
10417 else if (immlo == (immlo & 0x00ff0000))
10421 *immbits = immlo >> 16;
10424 else if (immlo == (immlo & 0xff000000))
10428 *immbits = immlo >> 24;
10431 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
10435 *immbits = (immlo >> 8) & 0xff;
10438 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
10442 *immbits = (immlo >> 16) & 0xff;
10449 /* Write immediate bits [7:0] to the following locations:
10451 |28/24|23 19|18 16|15 4|3 0|
10452 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
10454 This function is used by VMOV/VMVN/VORR/VBIC. */
10457 neon_write_immbits (unsigned immbits)
10459 inst.instruction |= immbits & 0xf;
10460 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
10461 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
10464 /* Invert low-order SIZE bits of XHI:XLO. */
10467 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
10469 unsigned immlo = xlo ? *xlo : 0;
10470 unsigned immhi = xhi ? *xhi : 0;
10475 immlo = (~immlo) & 0xff;
10479 immlo = (~immlo) & 0xffff;
10483 immhi = (~immhi) & 0xffffffff;
10484 /* fall through. */
10487 immlo = (~immlo) & 0xffffffff;
10502 do_neon_logic (void)
10504 if (inst.operands[2].present && inst.operands[2].isreg)
10506 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10507 neon_check_type (3, rs, N_IGNORE_TYPE);
10508 /* U bit and size field were set as part of the bitmask. */
10509 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10510 neon_three_same (rs == NS_QQQ, 0, -1);
10514 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10515 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
10517 enum neon_opc opcode = inst.instruction & 0x0fffffff;
10521 if (et.type == NT_invtype)
10524 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10529 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10534 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
10539 /* Pseudo-instruction for VBIC. */
10540 immbits = inst.operands[1].imm;
10541 neon_invert_size (&immbits, 0, et.size);
10542 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10546 /* Pseudo-instruction for VORR. */
10547 immbits = inst.operands[1].imm;
10548 neon_invert_size (&immbits, 0, et.size);
10549 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
10559 inst.instruction |= (rs == NS_QI) << 6;
10560 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10561 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10562 inst.instruction |= cmode << 8;
10563 neon_write_immbits (immbits);
10565 inst.instruction = neon_dp_fixup (inst.instruction);
10570 do_neon_bitfield (void)
10572 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10573 neon_check_type (3, rs, N_IGNORE_TYPE);
10574 neon_three_same (rs == NS_QQQ, 0, -1);
10578 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
10581 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10582 struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
10584 if (et.type == NT_float)
10586 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
10587 neon_three_same (rs == NS_QQQ, 0, -1);
10591 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10592 neon_three_same (rs == NS_QQQ, et.type == ubit_meaning, et.size);
10597 do_neon_dyadic_if_su (void)
10599 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10603 do_neon_dyadic_if_su_d (void)
10605 /* This version only allow D registers, but that constraint is enforced during
10606 operand parsing so we don't need to do anything extra here. */
10607 neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
10611 do_neon_dyadic_if_i (void)
10613 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10617 do_neon_dyadic_if_i_d (void)
10619 neon_dyadic_misc (NT_unsigned, N_IF_32, 0);
10623 do_neon_addsub_if_i (void)
10625 /* The "untyped" case can't happen. Do this to stop the "U" bit being
10626 affected if we specify unsigned args. */
10627 neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
10630 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
10632 V<op> A,B (A is operand 0, B is operand 2)
10637 so handle that case specially. */
10640 neon_exchange_operands (void)
10642 void *scratch = alloca (sizeof (inst.operands[0]));
10643 if (inst.operands[1].present)
10645 /* Swap operands[1] and operands[2]. */
10646 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
10647 inst.operands[1] = inst.operands[2];
10648 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
10652 inst.operands[1] = inst.operands[2];
10653 inst.operands[2] = inst.operands[0];
10658 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
10660 if (inst.operands[2].isreg)
10663 neon_exchange_operands ();
10664 neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
10668 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10669 struct neon_type_el et = neon_check_type (2, rs,
10670 N_EQK | N_SIZ, immtypes | N_KEY);
10672 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10673 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10674 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10675 inst.instruction |= LOW4 (inst.operands[1].reg);
10676 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10677 inst.instruction |= (rs == NS_QQI) << 6;
10678 inst.instruction |= (et.type == NT_float) << 10;
10679 inst.instruction |= neon_logbits (et.size) << 18;
10681 inst.instruction = neon_dp_fixup (inst.instruction);
10688 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10692 do_neon_cmp_inv (void)
10694 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10700 neon_compare (N_IF_32, N_IF_32, FALSE);
10703 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10704 scalars, which are encoded in 5 bits, M : Rm.
10705 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10706 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10710 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10712 unsigned regno = NEON_SCALAR_REG (scalar);
10713 unsigned elno = NEON_SCALAR_INDEX (scalar);
10718 if (regno > 7 || elno > 3)
10720 return regno | (elno << 3);
10723 if (regno > 15 || elno > 1)
10725 return regno | (elno << 4);
10729 first_error (_("scalar out of range for multiply instruction"));
10735 /* Encode multiply / multiply-accumulate scalar instructions. */
10738 neon_mul_mac (struct neon_type_el et, int ubit)
10742 /* Give a more helpful error message if we have an invalid type. */
10743 if (et.type == NT_invtype)
10746 scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10747 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10748 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10749 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10750 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10751 inst.instruction |= LOW4 (scalar);
10752 inst.instruction |= HI1 (scalar) << 5;
10753 inst.instruction |= (et.type == NT_float) << 8;
10754 inst.instruction |= neon_logbits (et.size) << 20;
10755 inst.instruction |= (ubit != 0) << 24;
10757 inst.instruction = neon_dp_fixup (inst.instruction);
10761 do_neon_mac_maybe_scalar (void)
10763 if (inst.operands[2].isscalar)
10765 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10766 struct neon_type_el et = neon_check_type (3, rs,
10767 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10768 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10769 neon_mul_mac (et, rs == NS_QQS);
10772 do_neon_dyadic_if_i ();
10778 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10779 struct neon_type_el et = neon_check_type (3, rs,
10780 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10781 neon_three_same (rs == NS_QQQ, 0, et.size);
10784 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10785 same types as the MAC equivalents. The polynomial type for this instruction
10786 is encoded the same as the integer type. */
10791 if (inst.operands[2].isscalar)
10792 do_neon_mac_maybe_scalar ();
10794 neon_dyadic_misc (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8, 0);
10798 do_neon_qdmulh (void)
10800 if (inst.operands[2].isscalar)
10802 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10803 struct neon_type_el et = neon_check_type (3, rs,
10804 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10805 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10806 neon_mul_mac (et, rs == NS_QQS);
10810 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10811 struct neon_type_el et = neon_check_type (3, rs,
10812 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10813 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10814 /* The U bit (rounding) comes from bit mask. */
10815 neon_three_same (rs == NS_QQQ, 0, et.size);
10820 do_neon_fcmp_absolute (void)
10822 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10823 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10824 /* Size field comes from bit mask. */
10825 neon_three_same (rs == NS_QQQ, 1, -1);
10829 do_neon_fcmp_absolute_inv (void)
10831 neon_exchange_operands ();
10832 do_neon_fcmp_absolute ();
10836 do_neon_step (void)
10838 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10839 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10840 neon_three_same (rs == NS_QQQ, 0, -1);
10844 do_neon_abs_neg (void)
10846 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10847 struct neon_type_el et = neon_check_type (3, rs,
10848 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10849 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10850 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10851 inst.instruction |= LOW4 (inst.operands[1].reg);
10852 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10853 inst.instruction |= (rs == NS_QQ) << 6;
10854 inst.instruction |= (et.type == NT_float) << 10;
10855 inst.instruction |= neon_logbits (et.size) << 18;
10857 inst.instruction = neon_dp_fixup (inst.instruction);
10863 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10864 struct neon_type_el et = neon_check_type (2, rs,
10865 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10866 int imm = inst.operands[2].imm;
10867 constraint (imm < 0 || (unsigned)imm >= et.size,
10868 _("immediate out of range for insert"));
10869 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10875 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10876 struct neon_type_el et = neon_check_type (2, rs,
10877 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10878 int imm = inst.operands[2].imm;
10879 constraint (imm < 1 || (unsigned)imm > et.size,
10880 _("immediate out of range for insert"));
10881 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10885 do_neon_qshlu_imm (void)
10887 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10888 struct neon_type_el et = neon_check_type (2, rs,
10889 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10890 int imm = inst.operands[2].imm;
10891 constraint (imm < 0 || (unsigned)imm >= et.size,
10892 _("immediate out of range for shift"));
10893 /* Only encodes the 'U present' variant of the instruction.
10894 In this case, signed types have OP (bit 8) set to 0.
10895 Unsigned types have OP set to 1. */
10896 inst.instruction |= (et.type == NT_unsigned) << 8;
10897 /* The rest of the bits are the same as other immediate shifts. */
10898 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10902 do_neon_qmovn (void)
10904 struct neon_type_el et = neon_check_type (2, NS_DQ,
10905 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10906 /* Saturating move where operands can be signed or unsigned, and the
10907 destination has the same signedness. */
10908 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10909 if (et.type == NT_unsigned)
10910 inst.instruction |= 0xc0;
10912 inst.instruction |= 0x80;
10913 neon_two_same (0, 1, et.size / 2);
10917 do_neon_qmovun (void)
10919 struct neon_type_el et = neon_check_type (2, NS_DQ,
10920 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10921 /* Saturating move with unsigned results. Operands must be signed. */
10922 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10923 neon_two_same (0, 1, et.size / 2);
10927 do_neon_rshift_sat_narrow (void)
10929 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10930 or unsigned. If operands are unsigned, results must also be unsigned. */
10931 struct neon_type_el et = neon_check_type (2, NS_DQI,
10932 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10933 int imm = inst.operands[2].imm;
10934 /* This gets the bounds check, size encoding and immediate bits calculation
10938 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10939 VQMOVN.I<size> <Dd>, <Qm>. */
10942 inst.operands[2].present = 0;
10943 inst.instruction = N_MNEM_vqmovn;
10948 constraint (imm < 1 || (unsigned)imm > et.size,
10949 _("immediate out of range"));
10950 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10954 do_neon_rshift_sat_narrow_u (void)
10956 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10957 or unsigned. If operands are unsigned, results must also be unsigned. */
10958 struct neon_type_el et = neon_check_type (2, NS_DQI,
10959 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10960 int imm = inst.operands[2].imm;
10961 /* This gets the bounds check, size encoding and immediate bits calculation
10965 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10966 VQMOVUN.I<size> <Dd>, <Qm>. */
10969 inst.operands[2].present = 0;
10970 inst.instruction = N_MNEM_vqmovun;
10975 constraint (imm < 1 || (unsigned)imm > et.size,
10976 _("immediate out of range"));
10977 /* FIXME: The manual is kind of unclear about what value U should have in
10978 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10980 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10984 do_neon_movn (void)
10986 struct neon_type_el et = neon_check_type (2, NS_DQ,
10987 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10988 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10989 neon_two_same (0, 1, et.size / 2);
10993 do_neon_rshift_narrow (void)
10995 struct neon_type_el et = neon_check_type (2, NS_DQI,
10996 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10997 int imm = inst.operands[2].imm;
10998 /* This gets the bounds check, size encoding and immediate bits calculation
11002 /* If immediate is zero then we are a pseudo-instruction for
11003 VMOVN.I<size> <Dd>, <Qm> */
11006 inst.operands[2].present = 0;
11007 inst.instruction = N_MNEM_vmovn;
11012 constraint (imm < 1 || (unsigned)imm > et.size,
11013 _("immediate out of range for narrowing operation"));
11014 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
11018 do_neon_shll (void)
11020 /* FIXME: Type checking when lengthening. */
11021 struct neon_type_el et = neon_check_type (2, NS_QDI,
11022 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
11023 unsigned imm = inst.operands[2].imm;
11025 if (imm == et.size)
11027 /* Maximum shift variant. */
11028 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11029 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11030 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11031 inst.instruction |= LOW4 (inst.operands[1].reg);
11032 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11033 inst.instruction |= neon_logbits (et.size) << 18;
11035 inst.instruction = neon_dp_fixup (inst.instruction);
11039 /* A more-specific type check for non-max versions. */
11040 et = neon_check_type (2, NS_QDI,
11041 N_EQK | N_DBL, N_SU_32 | N_KEY);
11042 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11043 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
11047 /* Check the various types for the VCVT instruction, and return the one that
11048 the current instruction is. */
11051 neon_cvt_flavour (enum neon_shape rs)
11053 #define CVT_VAR(C,X,Y) \
11054 et = neon_check_type (2, rs, (X), (Y)); \
11055 if (et.type != NT_invtype) \
11057 inst.error = NULL; \
11060 struct neon_type_el et;
11062 CVT_VAR (0, N_S32, N_F32);
11063 CVT_VAR (1, N_U32, N_F32);
11064 CVT_VAR (2, N_F32, N_S32);
11065 CVT_VAR (3, N_F32, N_U32);
11074 /* Fixed-point conversion with #0 immediate is encoded as an integer
11076 if (inst.operands[2].present && inst.operands[2].imm != 0)
11078 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11079 int flavour = neon_cvt_flavour (rs);
11080 unsigned immbits = 32 - inst.operands[2].imm;
11081 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
11082 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11084 inst.instruction |= enctab[flavour];
11085 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11086 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11087 inst.instruction |= LOW4 (inst.operands[1].reg);
11088 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11089 inst.instruction |= (rs == NS_QQI) << 6;
11090 inst.instruction |= 1 << 21;
11091 inst.instruction |= immbits << 16;
11095 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11096 int flavour = neon_cvt_flavour (rs);
11097 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
11098 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11100 inst.instruction |= enctab[flavour];
11101 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11102 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11103 inst.instruction |= LOW4 (inst.operands[1].reg);
11104 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11105 inst.instruction |= (rs == NS_QQ) << 6;
11106 inst.instruction |= 2 << 18;
11108 inst.instruction = neon_dp_fixup (inst.instruction);
11112 neon_move_immediate (void)
11114 enum neon_shape rs = neon_check_shape (NS_DI_QI);
11115 struct neon_type_el et = neon_check_type (1, rs,
11116 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
11117 unsigned immlo, immhi = 0, immbits;
11120 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
11121 op = (inst.instruction & (1 << 5)) != 0;
11123 immlo = inst.operands[1].imm;
11124 if (inst.operands[1].regisimm)
11125 immhi = inst.operands[1].reg;
11127 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
11128 _("immediate has bits set outside the operand size"));
11130 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11131 et.size, et.type)) == FAIL)
11133 /* Invert relevant bits only. */
11134 neon_invert_size (&immlo, &immhi, et.size);
11135 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
11136 with one or the other; those cases are caught by
11137 neon_cmode_for_move_imm. */
11139 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
11140 et.size, et.type)) == FAIL)
11142 first_error (_("immediate out of range"));
11147 inst.instruction &= ~(1 << 5);
11148 inst.instruction |= op << 5;
11150 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11151 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11152 inst.instruction |= (rs == NS_QI) << 6;
11153 inst.instruction |= cmode << 8;
11155 neon_write_immbits (immbits);
11161 if (inst.operands[1].isreg)
11163 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11165 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11166 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11167 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11168 inst.instruction |= LOW4 (inst.operands[1].reg);
11169 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11170 inst.instruction |= (rs == NS_QQ) << 6;
11174 inst.instruction = NEON_ENC_IMMED (inst.instruction);
11175 neon_move_immediate ();
11178 inst.instruction = neon_dp_fixup (inst.instruction);
11181 /* Encode instructions of form:
11183 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
11184 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
11189 neon_mixed_length (struct neon_type_el et, unsigned size)
11191 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11192 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11193 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11194 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11195 inst.instruction |= LOW4 (inst.operands[2].reg);
11196 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11197 inst.instruction |= (et.type == NT_unsigned) << 24;
11198 inst.instruction |= neon_logbits (size) << 20;
11200 inst.instruction = neon_dp_fixup (inst.instruction);
11204 do_neon_dyadic_long (void)
11206 /* FIXME: Type checking for lengthening op. */
11207 struct neon_type_el et = neon_check_type (3, NS_QDD,
11208 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
11209 neon_mixed_length (et, et.size);
11213 do_neon_abal (void)
11215 struct neon_type_el et = neon_check_type (3, NS_QDD,
11216 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
11217 neon_mixed_length (et, et.size);
11221 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
11223 if (inst.operands[2].isscalar)
11225 struct neon_type_el et = neon_check_type (3, NS_QDS,
11226 N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
11227 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11228 neon_mul_mac (et, et.type == NT_unsigned);
11232 struct neon_type_el et = neon_check_type (3, NS_QDD,
11233 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
11234 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11235 neon_mixed_length (et, et.size);
11240 do_neon_mac_maybe_scalar_long (void)
11242 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
11246 do_neon_dyadic_wide (void)
11248 struct neon_type_el et = neon_check_type (3, NS_QQD,
11249 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
11250 neon_mixed_length (et, et.size);
11254 do_neon_dyadic_narrow (void)
11256 struct neon_type_el et = neon_check_type (3, NS_QDD,
11257 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
11258 neon_mixed_length (et, et.size / 2);
11262 do_neon_mul_sat_scalar_long (void)
11264 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
11268 do_neon_vmull (void)
11270 if (inst.operands[2].isscalar)
11271 do_neon_mac_maybe_scalar_long ();
11274 struct neon_type_el et = neon_check_type (3, NS_QDD,
11275 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
11276 if (et.type == NT_poly)
11277 inst.instruction = NEON_ENC_POLY (inst.instruction);
11279 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11280 /* For polynomial encoding, size field must be 0b00 and the U bit must be
11281 zero. Should be OK as-is. */
11282 neon_mixed_length (et, et.size);
11289 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
11290 struct neon_type_el et = neon_check_type (3, rs,
11291 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
11292 unsigned imm = (inst.operands[3].imm * et.size) / 8;
11293 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11294 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11295 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11296 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11297 inst.instruction |= LOW4 (inst.operands[2].reg);
11298 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11299 inst.instruction |= (rs == NS_QQQI) << 6;
11300 inst.instruction |= imm << 8;
11302 inst.instruction = neon_dp_fixup (inst.instruction);
11308 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11309 struct neon_type_el et = neon_check_type (2, rs,
11310 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11311 unsigned op = (inst.instruction >> 7) & 3;
11312 /* N (width of reversed regions) is encoded as part of the bitmask. We
11313 extract it here to check the elements to be reversed are smaller.
11314 Otherwise we'd get a reserved instruction. */
11315 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
11316 assert (elsize != 0);
11317 constraint (et.size >= elsize,
11318 _("elements must be smaller than reversal region"));
11319 neon_two_same (rs == NS_QQ, 1, et.size);
11325 if (inst.operands[1].isscalar)
11327 enum neon_shape rs = neon_check_shape (NS_DS_QS);
11328 struct neon_type_el et = neon_check_type (2, rs,
11329 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11330 unsigned sizebits = et.size >> 3;
11331 unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
11332 int logsize = neon_logbits (et.size);
11333 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
11334 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
11335 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11336 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11337 inst.instruction |= LOW4 (dm);
11338 inst.instruction |= HI1 (dm) << 5;
11339 inst.instruction |= (rs == NS_QS) << 6;
11340 inst.instruction |= x << 17;
11341 inst.instruction |= sizebits << 16;
11343 inst.instruction = neon_dp_fixup (inst.instruction);
11347 enum neon_shape rs = neon_check_shape (NS_DR_QR);
11348 struct neon_type_el et = neon_check_type (1, rs,
11349 N_8 | N_16 | N_32 | N_KEY);
11350 unsigned save_cond = inst.instruction & 0xf0000000;
11351 /* Duplicate ARM register to lanes of vector. */
11352 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
11355 case 8: inst.instruction |= 0x400000; break;
11356 case 16: inst.instruction |= 0x000020; break;
11357 case 32: inst.instruction |= 0x000000; break;
11360 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11361 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
11362 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
11363 inst.instruction |= (rs == NS_QR) << 21;
11364 /* The encoding for this instruction is identical for the ARM and Thumb
11365 variants, except for the condition field. */
11367 inst.instruction |= 0xe0000000;
11369 inst.instruction |= save_cond;
11373 /* VMOV has particularly many variations. It can be one of:
11374 0. VMOV<c><q> <Qd>, <Qm>
11375 1. VMOV<c><q> <Dd>, <Dm>
11376 (Register operations, which are VORR with Rm = Rn.)
11377 2. VMOV<c><q>.<dt> <Qd>, #<imm>
11378 3. VMOV<c><q>.<dt> <Dd>, #<imm>
11380 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
11381 (ARM register to scalar.)
11382 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
11383 (Two ARM registers to vector.)
11384 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
11385 (Scalar to ARM register.)
11386 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
11387 (Vector to two ARM registers.)
11389 We should have just enough information to be able to disambiguate most of
11390 these, apart from "Two ARM registers to vector" and "Vector to two ARM
11391 registers" cases. For these, abuse the .regisimm operand field to signify a
11394 All the encoded bits are hardcoded by this function.
11396 Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
11397 Cases 5, 7 may be used with VFPv2 and above.
11399 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
11400 can specify a type where it doesn't make sense to, and is ignored).
11406 int nargs = inst.operands[0].present + inst.operands[1].present
11407 + inst.operands[2].present;
11408 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
11409 const char *vfp_vers = "selected FPU does not support instruction";
11414 /* Cases 0, 1, 2, 3, 4, 6. */
11415 if (inst.operands[1].isscalar)
11418 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11419 N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
11420 unsigned logsize = neon_logbits (et.size);
11421 unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
11422 unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
11423 unsigned abcdebits = 0;
11425 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11427 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11428 && et.size != 32, _(vfp_vers));
11429 constraint (et.type == NT_invtype, _("bad type for scalar"));
11430 constraint (x >= 64 / et.size, _("scalar index out of range"));
11434 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
11435 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
11436 case 32: abcdebits = 0x00; break;
11440 abcdebits |= x << logsize;
11441 inst.instruction = save_cond;
11442 inst.instruction |= 0xe100b10;
11443 inst.instruction |= LOW4 (dn) << 16;
11444 inst.instruction |= HI1 (dn) << 7;
11445 inst.instruction |= inst.operands[0].reg << 12;
11446 inst.instruction |= (abcdebits & 3) << 5;
11447 inst.instruction |= (abcdebits >> 2) << 21;
11449 else if (inst.operands[1].isreg)
11451 /* Cases 0, 1, 4. */
11452 if (inst.operands[0].isscalar)
11455 unsigned bcdebits = 0;
11456 struct neon_type_el et = neon_check_type (2, NS_IGNORE,
11457 N_8 | N_16 | N_32 | N_KEY, N_EQK);
11458 int logsize = neon_logbits (et.size);
11459 unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
11460 unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
11462 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1),
11464 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1)
11465 && et.size != 32, _(vfp_vers));
11466 constraint (et.type == NT_invtype, _("bad type for scalar"));
11467 constraint (x >= 64 / et.size, _("scalar index out of range"));
11471 case 8: bcdebits = 0x8; break;
11472 case 16: bcdebits = 0x1; break;
11473 case 32: bcdebits = 0x0; break;
11477 bcdebits |= x << logsize;
11478 inst.instruction = save_cond;
11479 inst.instruction |= 0xe000b10;
11480 inst.instruction |= LOW4 (dn) << 16;
11481 inst.instruction |= HI1 (dn) << 7;
11482 inst.instruction |= inst.operands[1].reg << 12;
11483 inst.instruction |= (bcdebits & 3) << 5;
11484 inst.instruction |= (bcdebits >> 2) << 21;
11489 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11490 /* The architecture manual I have doesn't explicitly state which
11491 value the U bit should have for register->register moves, but
11492 the equivalent VORR instruction has U = 0, so do that. */
11493 inst.instruction = 0x0200110;
11494 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11495 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11496 inst.instruction |= LOW4 (inst.operands[1].reg);
11497 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
11498 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11499 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11500 inst.instruction |= (rs == NS_QQ) << 6;
11502 inst.instruction = neon_dp_fixup (inst.instruction);
11508 inst.instruction = 0x0800010;
11509 neon_move_immediate ();
11510 inst.instruction = neon_dp_fixup (inst.instruction);
11516 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2),
11519 if (inst.operands[0].regisimm)
11522 inst.instruction = save_cond;
11523 inst.instruction |= 0xc400b10;
11524 inst.instruction |= LOW4 (inst.operands[0].reg);
11525 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
11526 inst.instruction |= inst.operands[1].reg << 12;
11527 inst.instruction |= inst.operands[2].reg << 16;
11532 inst.instruction = save_cond;
11533 inst.instruction |= 0xc500b10;
11534 inst.instruction |= inst.operands[0].reg << 12;
11535 inst.instruction |= inst.operands[1].reg << 16;
11536 inst.instruction |= LOW4 (inst.operands[2].reg);
11537 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11547 do_neon_rshift_round_imm (void)
11549 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
11550 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
11551 int imm = inst.operands[2].imm;
11553 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
11556 inst.operands[2].present = 0;
11561 constraint (imm < 1 || (unsigned)imm > et.size,
11562 _("immediate out of range for shift"));
11563 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
11568 do_neon_movl (void)
11570 struct neon_type_el et = neon_check_type (2, NS_QD,
11571 N_EQK | N_DBL, N_SU_32 | N_KEY);
11572 unsigned sizebits = et.size >> 3;
11573 inst.instruction |= sizebits << 19;
11574 neon_two_same (0, et.type == NT_unsigned, -1);
11580 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11581 struct neon_type_el et = neon_check_type (2, rs,
11582 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11583 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
11584 neon_two_same (rs == NS_QQ, 1, et.size);
11588 do_neon_zip_uzp (void)
11590 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11591 struct neon_type_el et = neon_check_type (2, rs,
11592 N_EQK, N_8 | N_16 | N_32 | N_KEY);
11593 if (rs == NS_DD && et.size == 32)
11595 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
11596 inst.instruction = N_MNEM_vtrn;
11600 neon_two_same (rs == NS_QQ, 1, et.size);
11604 do_neon_sat_abs_neg (void)
11606 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11607 struct neon_type_el et = neon_check_type (2, rs,
11608 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11609 neon_two_same (rs == NS_QQ, 1, et.size);
11613 do_neon_pair_long (void)
11615 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11616 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
11617 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
11618 inst.instruction |= (et.type == NT_unsigned) << 7;
11619 neon_two_same (rs == NS_QQ, 1, et.size);
11623 do_neon_recip_est (void)
11625 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11626 struct neon_type_el et = neon_check_type (2, rs,
11627 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
11628 inst.instruction |= (et.type == NT_float) << 8;
11629 neon_two_same (rs == NS_QQ, 1, et.size);
11635 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11636 struct neon_type_el et = neon_check_type (2, rs,
11637 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
11638 neon_two_same (rs == NS_QQ, 1, et.size);
11644 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11645 struct neon_type_el et = neon_check_type (2, rs,
11646 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
11647 neon_two_same (rs == NS_QQ, 1, et.size);
11653 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11654 struct neon_type_el et = neon_check_type (2, rs,
11655 N_EQK | N_INT, N_8 | N_KEY);
11656 neon_two_same (rs == NS_QQ, 1, et.size);
11662 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
11663 neon_two_same (rs == NS_QQ, 1, -1);
11667 do_neon_tbl_tbx (void)
11669 unsigned listlenbits;
11670 neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
11672 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
11674 first_error (_("bad list length for table lookup"));
11678 listlenbits = inst.operands[1].imm - 1;
11679 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11680 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11681 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
11682 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
11683 inst.instruction |= LOW4 (inst.operands[2].reg);
11684 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
11685 inst.instruction |= listlenbits << 8;
11687 inst.instruction = neon_dp_fixup (inst.instruction);
11691 do_neon_ldm_stm (void)
11693 /* P, U and L bits are part of bitmask. */
11694 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
11695 unsigned offsetbits = inst.operands[1].imm * 2;
11697 constraint (is_dbmode && !inst.operands[0].writeback,
11698 _("writeback (!) must be used for VLDMDB and VSTMDB"));
11700 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
11701 _("register list must contain at least 1 and at most 16 "
11704 inst.instruction |= inst.operands[0].reg << 16;
11705 inst.instruction |= inst.operands[0].writeback << 21;
11706 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
11707 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11709 inst.instruction |= offsetbits;
11712 inst.instruction |= 0xe0000000;
11716 do_neon_ldr_str (void)
11718 unsigned offsetbits;
11720 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11722 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11723 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11725 constraint (inst.reloc.pc_rel && !is_ldr,
11726 _("PC-relative addressing unavailable with VSTR"));
11728 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11729 _("Immediate value must be a constant"));
11731 if (inst.reloc.exp.X_add_number < 0)
11734 offsetbits = -inst.reloc.exp.X_add_number / 4;
11737 offsetbits = inst.reloc.exp.X_add_number / 4;
11739 /* FIXME: Does this catch everything? */
11740 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11741 || inst.operands[1].postind || inst.operands[1].writeback
11742 || inst.operands[1].immisreg || inst.operands[1].shifted,
11744 constraint ((inst.operands[1].imm & 3) != 0,
11745 _("Offset must be a multiple of 4"));
11746 constraint (offsetbits != (offsetbits & 0xff),
11747 _("Immediate offset out of range"));
11749 inst.instruction |= inst.operands[1].reg << 16;
11750 inst.instruction |= offsetbits & 0xff;
11751 inst.instruction |= offset_up << 23;
11754 inst.instruction |= 0xe0000000;
11756 if (inst.reloc.pc_rel)
11759 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11761 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11764 inst.reloc.type = BFD_RELOC_UNUSED;
11767 /* "interleave" version also handles non-interleaving register VLD1/VST1
11771 do_neon_ld_st_interleave (void)
11773 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11774 N_8 | N_16 | N_32 | N_64);
11775 unsigned alignbits = 0;
11777 /* The bits in this table go:
11778 0: register stride of one (0) or two (1)
11779 1,2: register list length, minus one (1, 2, 3, 4).
11780 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11781 We use -1 for invalid entries. */
11782 const int typetable[] =
11784 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11785 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11786 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11787 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11791 if (et.type == NT_invtype)
11794 if (inst.operands[1].immisalign)
11795 switch (inst.operands[1].imm >> 8)
11797 case 64: alignbits = 1; break;
11799 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11800 goto bad_alignment;
11804 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11805 goto bad_alignment;
11810 first_error (_("bad alignment"));
11814 inst.instruction |= alignbits << 4;
11815 inst.instruction |= neon_logbits (et.size) << 6;
11817 /* Bits [4:6] of the immediate in a list specifier encode register stride
11818 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11819 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11820 up the right value for "type" in a table based on this value and the given
11821 list style, then stick it back. */
11822 idx = ((inst.operands[0].imm >> 4) & 7)
11823 | (((inst.instruction >> 8) & 3) << 3);
11825 typebits = typetable[idx];
11827 constraint (typebits == -1, _("bad list type for instruction"));
11829 inst.instruction &= ~0xf00;
11830 inst.instruction |= typebits << 8;
11833 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11834 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11835 otherwise. The variable arguments are a list of pairs of legal (size, align)
11836 values, terminated with -1. */
11839 neon_alignment_bit (int size, int align, int *do_align, ...)
11842 int result = FAIL, thissize, thisalign;
11844 if (!inst.operands[1].immisalign)
11850 va_start (ap, do_align);
11854 thissize = va_arg (ap, int);
11855 if (thissize == -1)
11857 thisalign = va_arg (ap, int);
11859 if (size == thissize && align == thisalign)
11862 while (result != SUCCESS);
11866 if (result == SUCCESS)
11869 first_error (_("unsupported alignment for instruction"));
11875 do_neon_ld_st_lane (void)
11877 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11878 int align_good, do_align = 0;
11879 int logsize = neon_logbits (et.size);
11880 int align = inst.operands[1].imm >> 8;
11881 int n = (inst.instruction >> 8) & 3;
11882 int max_el = 64 / et.size;
11884 if (et.type == NT_invtype)
11887 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11888 _("bad list length"));
11889 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11890 _("scalar index out of range"));
11891 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11893 _("stride of 2 unavailable when element size is 8"));
11897 case 0: /* VLD1 / VST1. */
11898 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11900 if (align_good == FAIL)
11904 unsigned alignbits = 0;
11907 case 16: alignbits = 0x1; break;
11908 case 32: alignbits = 0x3; break;
11911 inst.instruction |= alignbits << 4;
11915 case 1: /* VLD2 / VST2. */
11916 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11918 if (align_good == FAIL)
11921 inst.instruction |= 1 << 4;
11924 case 2: /* VLD3 / VST3. */
11925 constraint (inst.operands[1].immisalign,
11926 _("can't use alignment with this instruction"));
11929 case 3: /* VLD4 / VST4. */
11930 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11931 16, 64, 32, 64, 32, 128, -1);
11932 if (align_good == FAIL)
11936 unsigned alignbits = 0;
11939 case 8: alignbits = 0x1; break;
11940 case 16: alignbits = 0x1; break;
11941 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11944 inst.instruction |= alignbits << 4;
11951 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11952 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11953 inst.instruction |= 1 << (4 + logsize);
11955 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11956 inst.instruction |= logsize << 10;
11959 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11962 do_neon_ld_dup (void)
11964 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11965 int align_good, do_align = 0;
11967 if (et.type == NT_invtype)
11970 switch ((inst.instruction >> 8) & 3)
11972 case 0: /* VLD1. */
11973 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11974 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11975 &do_align, 16, 16, 32, 32, -1);
11976 if (align_good == FAIL)
11978 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11981 case 2: inst.instruction |= 1 << 5; break;
11982 default: first_error (_("bad list length")); return;
11984 inst.instruction |= neon_logbits (et.size) << 6;
11987 case 1: /* VLD2. */
11988 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11989 &do_align, 8, 16, 16, 32, 32, 64, -1);
11990 if (align_good == FAIL)
11992 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11993 _("bad list length"));
11994 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11995 inst.instruction |= 1 << 5;
11996 inst.instruction |= neon_logbits (et.size) << 6;
11999 case 2: /* VLD3. */
12000 constraint (inst.operands[1].immisalign,
12001 _("can't use alignment with this instruction"));
12002 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
12003 _("bad list length"));
12004 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12005 inst.instruction |= 1 << 5;
12006 inst.instruction |= neon_logbits (et.size) << 6;
12009 case 3: /* VLD4. */
12011 int align = inst.operands[1].imm >> 8;
12012 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
12013 16, 64, 32, 64, 32, 128, -1);
12014 if (align_good == FAIL)
12016 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
12017 _("bad list length"));
12018 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
12019 inst.instruction |= 1 << 5;
12020 if (et.size == 32 && align == 128)
12021 inst.instruction |= 0x3 << 6;
12023 inst.instruction |= neon_logbits (et.size) << 6;
12030 inst.instruction |= do_align << 4;
12033 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
12034 apart from bits [11:4]. */
12037 do_neon_ldx_stx (void)
12039 switch (NEON_LANE (inst.operands[0].imm))
12041 case NEON_INTERLEAVE_LANES:
12042 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
12043 do_neon_ld_st_interleave ();
12046 case NEON_ALL_LANES:
12047 inst.instruction = NEON_ENC_DUP (inst.instruction);
12052 inst.instruction = NEON_ENC_LANE (inst.instruction);
12053 do_neon_ld_st_lane ();
12056 /* L bit comes from bit mask. */
12057 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
12058 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
12059 inst.instruction |= inst.operands[1].reg << 16;
12061 if (inst.operands[1].postind)
12063 int postreg = inst.operands[1].imm & 0xf;
12064 constraint (!inst.operands[1].immisreg,
12065 _("post-index must be a register"));
12066 constraint (postreg == 0xd || postreg == 0xf,
12067 _("bad register for post-index"));
12068 inst.instruction |= postreg;
12070 else if (inst.operands[1].writeback)
12072 inst.instruction |= 0xd;
12075 inst.instruction |= 0xf;
12078 inst.instruction |= 0xf9000000;
12080 inst.instruction |= 0xf4000000;
12084 /* Overall per-instruction processing. */
12086 /* We need to be able to fix up arbitrary expressions in some statements.
12087 This is so that we can handle symbols that are an arbitrary distance from
12088 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
12089 which returns part of an address in a form which will be valid for
12090 a data instruction. We do this by pushing the expression into a symbol
12091 in the expr_section, and creating a fix for that. */
12094 fix_new_arm (fragS * frag,
12109 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
12113 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
12118 /* Mark whether the fix is to a THUMB instruction, or an ARM
12120 new_fix->tc_fix_data = thumb_mode;
12123 /* Create a frg for an instruction requiring relaxation. */
12125 output_relax_insn (void)
12132 /* The size of the instruction is unknown, so tie the debug info to the
12133 start of the instruction. */
12134 dwarf2_emit_insn (0);
12137 switch (inst.reloc.exp.X_op)
12140 sym = inst.reloc.exp.X_add_symbol;
12141 offset = inst.reloc.exp.X_add_number;
12145 offset = inst.reloc.exp.X_add_number;
12148 sym = make_expr_symbol (&inst.reloc.exp);
12152 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
12153 inst.relax, sym, offset, NULL/*offset, opcode*/);
12154 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
12157 /* Write a 32-bit thumb instruction to buf. */
12159 put_thumb32_insn (char * buf, unsigned long insn)
12161 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
12162 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
12166 output_inst (const char * str)
12172 as_bad ("%s -- `%s'", inst.error, str);
12176 output_relax_insn();
12179 if (inst.size == 0)
12182 to = frag_more (inst.size);
12184 if (thumb_mode && (inst.size > THUMB_SIZE))
12186 assert (inst.size == (2 * THUMB_SIZE));
12187 put_thumb32_insn (to, inst.instruction);
12189 else if (inst.size > INSN_SIZE)
12191 assert (inst.size == (2 * INSN_SIZE));
12192 md_number_to_chars (to, inst.instruction, INSN_SIZE);
12193 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
12196 md_number_to_chars (to, inst.instruction, inst.size);
12198 if (inst.reloc.type != BFD_RELOC_UNUSED)
12199 fix_new_arm (frag_now, to - frag_now->fr_literal,
12200 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
12204 dwarf2_emit_insn (inst.size);
12208 /* Tag values used in struct asm_opcode's tag field. */
12211 OT_unconditional, /* Instruction cannot be conditionalized.
12212 The ARM condition field is still 0xE. */
12213 OT_unconditionalF, /* Instruction cannot be conditionalized
12214 and carries 0xF in its ARM condition field. */
12215 OT_csuffix, /* Instruction takes a conditional suffix. */
12216 OT_cinfix3, /* Instruction takes a conditional infix,
12217 beginning at character index 3. (In
12218 unified mode, it becomes a suffix.) */
12219 OT_cinfix3_deprecated, /* The same as OT_cinfix3. This is used for
12220 tsts, cmps, cmns, and teqs. */
12221 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
12222 character index 3, even in unified mode. Used for
12223 legacy instructions where suffix and infix forms
12224 may be ambiguous. */
12225 OT_csuf_or_in3, /* Instruction takes either a conditional
12226 suffix or an infix at character index 3. */
12227 OT_odd_infix_unc, /* This is the unconditional variant of an
12228 instruction that takes a conditional infix
12229 at an unusual position. In unified mode,
12230 this variant will accept a suffix. */
12231 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
12232 are the conditional variants of instructions that
12233 take conditional infixes in unusual positions.
12234 The infix appears at character index
12235 (tag - OT_odd_infix_0). These are not accepted
12236 in unified mode. */
12239 /* Subroutine of md_assemble, responsible for looking up the primary
12240 opcode from the mnemonic the user wrote. STR points to the
12241 beginning of the mnemonic.
12243 This is not simply a hash table lookup, because of conditional
12244 variants. Most instructions have conditional variants, which are
12245 expressed with a _conditional affix_ to the mnemonic. If we were
12246 to encode each conditional variant as a literal string in the opcode
12247 table, it would have approximately 20,000 entries.
12249 Most mnemonics take this affix as a suffix, and in unified syntax,
12250 'most' is upgraded to 'all'. However, in the divided syntax, some
12251 instructions take the affix as an infix, notably the s-variants of
12252 the arithmetic instructions. Of those instructions, all but six
12253 have the infix appear after the third character of the mnemonic.
12255 Accordingly, the algorithm for looking up primary opcodes given
12258 1. Look up the identifier in the opcode table.
12259 If we find a match, go to step U.
12261 2. Look up the last two characters of the identifier in the
12262 conditions table. If we find a match, look up the first N-2
12263 characters of the identifier in the opcode table. If we
12264 find a match, go to step CE.
12266 3. Look up the fourth and fifth characters of the identifier in
12267 the conditions table. If we find a match, extract those
12268 characters from the identifier, and look up the remaining
12269 characters in the opcode table. If we find a match, go
12274 U. Examine the tag field of the opcode structure, in case this is
12275 one of the six instructions with its conditional infix in an
12276 unusual place. If it is, the tag tells us where to find the
12277 infix; look it up in the conditions table and set inst.cond
12278 accordingly. Otherwise, this is an unconditional instruction.
12279 Again set inst.cond accordingly. Return the opcode structure.
12281 CE. Examine the tag field to make sure this is an instruction that
12282 should receive a conditional suffix. If it is not, fail.
12283 Otherwise, set inst.cond from the suffix we already looked up,
12284 and return the opcode structure.
12286 CM. Examine the tag field to make sure this is an instruction that
12287 should receive a conditional infix after the third character.
12288 If it is not, fail. Otherwise, undo the edits to the current
12289 line of input and proceed as for case CE. */
12291 static const struct asm_opcode *
12292 opcode_lookup (char **str)
12296 const struct asm_opcode *opcode;
12297 const struct asm_cond *cond;
12300 /* Scan up to the end of the mnemonic, which must end in white space,
12301 '.' (in unified mode only), or end of string. */
12302 for (base = end = *str; *end != '\0'; end++)
12303 if (*end == ' ' || (unified_syntax && *end == '.'))
12309 /* Handle a possible width suffix and/or Neon type suffix. */
12316 else if (end[1] == 'n')
12321 inst.vectype.elems = 0;
12323 *str = end + offset;
12325 if (end[offset] == '.')
12327 /* See if we have a Neon type suffix. */
12328 if (parse_neon_type (&inst.vectype, str) == FAIL)
12331 else if (end[offset] != '\0' && end[offset] != ' ')
12337 /* Look for unaffixed or special-case affixed mnemonic. */
12338 opcode = hash_find_n (arm_ops_hsh, base, end - base);
12342 if (opcode->tag < OT_odd_infix_0)
12344 inst.cond = COND_ALWAYS;
12348 if (unified_syntax)
12349 as_warn (_("conditional infixes are deprecated in unified syntax"));
12350 affix = base + (opcode->tag - OT_odd_infix_0);
12351 cond = hash_find_n (arm_cond_hsh, affix, 2);
12354 inst.cond = cond->value;
12358 /* Cannot have a conditional suffix on a mnemonic of less than two
12360 if (end - base < 3)
12363 /* Look for suffixed mnemonic. */
12365 cond = hash_find_n (arm_cond_hsh, affix, 2);
12366 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
12367 if (opcode && cond)
12370 switch (opcode->tag)
12372 case OT_cinfix3_legacy:
12373 /* Ignore conditional suffixes matched on infix only mnemonics. */
12377 case OT_cinfix3_deprecated:
12378 case OT_odd_infix_unc:
12379 if (!unified_syntax)
12381 /* else fall through */
12384 case OT_csuf_or_in3:
12385 inst.cond = cond->value;
12388 case OT_unconditional:
12389 case OT_unconditionalF:
12392 inst.cond = cond->value;
12396 /* delayed diagnostic */
12397 inst.error = BAD_COND;
12398 inst.cond = COND_ALWAYS;
12407 /* Cannot have a usual-position infix on a mnemonic of less than
12408 six characters (five would be a suffix). */
12409 if (end - base < 6)
12412 /* Look for infixed mnemonic in the usual position. */
12414 cond = hash_find_n (arm_cond_hsh, affix, 2);
12418 memcpy (save, affix, 2);
12419 memmove (affix, affix + 2, (end - affix) - 2);
12420 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
12421 memmove (affix + 2, affix, (end - affix) - 2);
12422 memcpy (affix, save, 2);
12425 && (opcode->tag == OT_cinfix3
12426 || opcode->tag == OT_cinfix3_deprecated
12427 || opcode->tag == OT_csuf_or_in3
12428 || opcode->tag == OT_cinfix3_legacy))
12432 && (opcode->tag == OT_cinfix3
12433 || opcode->tag == OT_cinfix3_deprecated))
12434 as_warn (_("conditional infixes are deprecated in unified syntax"));
12436 inst.cond = cond->value;
12444 md_assemble (char *str)
12447 const struct asm_opcode * opcode;
12449 /* Align the previous label if needed. */
12450 if (last_label_seen != NULL)
12452 symbol_set_frag (last_label_seen, frag_now);
12453 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
12454 S_SET_SEGMENT (last_label_seen, now_seg);
12457 memset (&inst, '\0', sizeof (inst));
12458 inst.reloc.type = BFD_RELOC_UNUSED;
12460 opcode = opcode_lookup (&p);
12463 /* It wasn't an instruction, but it might be a register alias of
12464 the form alias .req reg, or a Neon .dn/.qn directive. */
12465 if (!create_register_alias (str, p)
12466 && !create_neon_reg_alias (str, p))
12467 as_bad (_("bad instruction `%s'"), str);
12472 if (opcode->tag == OT_cinfix3_deprecated)
12473 as_warn (_("s suffix on comparison instruction is deprecated"));
12477 arm_feature_set variant;
12479 variant = cpu_variant;
12480 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
12481 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
12482 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
12483 /* Check that this instruction is supported for this CPU. */
12484 if (!opcode->tvariant
12485 || (thumb_mode == 1
12486 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
12488 as_bad (_("selected processor does not support `%s'"), str);
12491 if (inst.cond != COND_ALWAYS && !unified_syntax
12492 && opcode->tencode != do_t_branch)
12494 as_bad (_("Thumb does not support conditional execution"));
12498 /* Check conditional suffixes. */
12499 if (current_it_mask)
12502 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
12503 current_it_mask <<= 1;
12504 current_it_mask &= 0x1f;
12505 /* The BKPT instruction is unconditional even in an IT block. */
12507 && cond != inst.cond && opcode->tencode != do_t_bkpt)
12509 as_bad (_("incorrect condition in IT block"));
12513 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
12515 as_bad (_("thumb conditional instrunction not in IT block"));
12519 mapping_state (MAP_THUMB);
12520 inst.instruction = opcode->tvalue;
12522 if (!parse_operands (p, opcode->operands))
12523 opcode->tencode ();
12525 /* Clear current_it_mask at the end of an IT block. */
12526 if (current_it_mask == 0x10)
12527 current_it_mask = 0;
12529 if (!(inst.error || inst.relax))
12531 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
12532 inst.size = (inst.instruction > 0xffff ? 4 : 2);
12533 if (inst.size_req && inst.size_req != inst.size)
12535 as_bad (_("cannot honor width suffix -- `%s'"), str);
12539 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12540 *opcode->tvariant);
12541 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
12542 set those bits when Thumb-2 32-bit instructions are seen. ie.
12543 anything other than bl/blx.
12544 This is overly pessimistic for relaxable instructions. */
12545 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
12547 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
12552 /* Check that this instruction is supported for this CPU. */
12553 if (!opcode->avariant ||
12554 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
12556 as_bad (_("selected processor does not support `%s'"), str);
12561 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
12565 mapping_state (MAP_ARM);
12566 inst.instruction = opcode->avalue;
12567 if (opcode->tag == OT_unconditionalF)
12568 inst.instruction |= 0xF << 28;
12570 inst.instruction |= inst.cond << 28;
12571 inst.size = INSN_SIZE;
12572 if (!parse_operands (p, opcode->operands))
12573 opcode->aencode ();
12574 /* Arm mode bx is marked as both v4T and v5 because it's still required
12575 on a hypothetical non-thumb v5 core. */
12576 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
12577 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
12578 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
12580 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
12581 *opcode->avariant);
12586 /* Various frobbings of labels and their addresses. */
12589 arm_start_line_hook (void)
12591 last_label_seen = NULL;
12595 arm_frob_label (symbolS * sym)
12597 last_label_seen = sym;
12599 ARM_SET_THUMB (sym, thumb_mode);
12601 #if defined OBJ_COFF || defined OBJ_ELF
12602 ARM_SET_INTERWORK (sym, support_interwork);
12605 /* Note - do not allow local symbols (.Lxxx) to be labeled
12606 as Thumb functions. This is because these labels, whilst
12607 they exist inside Thumb code, are not the entry points for
12608 possible ARM->Thumb calls. Also, these labels can be used
12609 as part of a computed goto or switch statement. eg gcc
12610 can generate code that looks like this:
12612 ldr r2, [pc, .Laaa]
12622 The first instruction loads the address of the jump table.
12623 The second instruction converts a table index into a byte offset.
12624 The third instruction gets the jump address out of the table.
12625 The fourth instruction performs the jump.
12627 If the address stored at .Laaa is that of a symbol which has the
12628 Thumb_Func bit set, then the linker will arrange for this address
12629 to have the bottom bit set, which in turn would mean that the
12630 address computation performed by the third instruction would end
12631 up with the bottom bit set. Since the ARM is capable of unaligned
12632 word loads, the instruction would then load the incorrect address
12633 out of the jump table, and chaos would ensue. */
12634 if (label_is_thumb_function_name
12635 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
12636 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
12638 /* When the address of a Thumb function is taken the bottom
12639 bit of that address should be set. This will allow
12640 interworking between Arm and Thumb functions to work
12643 THUMB_SET_FUNC (sym, 1);
12645 label_is_thumb_function_name = FALSE;
12649 dwarf2_emit_label (sym);
12654 arm_data_in_code (void)
12656 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12658 *input_line_pointer = '/';
12659 input_line_pointer += 5;
12660 *input_line_pointer = 0;
12668 arm_canonicalize_symbol_name (char * name)
12672 if (thumb_mode && (len = strlen (name)) > 5
12673 && streq (name + len - 5, "/data"))
12674 *(name + len - 5) = 0;
12679 /* Table of all register names defined by default. The user can
12680 define additional names with .req. Note that all register names
12681 should appear in both upper and lowercase variants. Some registers
12682 also have mixed-case names. */
12684 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
12685 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12686 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12687 #define REGSET(p,t) \
12688 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12689 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12690 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12691 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12692 #define REGSETH(p,t) \
12693 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12694 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12695 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12696 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12697 #define REGSET2(p,t) \
12698 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12699 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12700 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12701 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12703 static const struct reg_entry reg_names[] =
12705 /* ARM integer registers. */
12706 REGSET(r, RN), REGSET(R, RN),
12708 /* ATPCS synonyms. */
12709 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12710 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12711 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12713 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12714 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12715 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12717 /* Well-known aliases. */
12718 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12719 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12721 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12722 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12724 /* Coprocessor numbers. */
12725 REGSET(p, CP), REGSET(P, CP),
12727 /* Coprocessor register numbers. The "cr" variants are for backward
12729 REGSET(c, CN), REGSET(C, CN),
12730 REGSET(cr, CN), REGSET(CR, CN),
12732 /* FPA registers. */
12733 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12734 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12736 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12737 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12739 /* VFP SP registers. */
12740 REGSET(s,VFS), REGSET(S,VFS),
12741 REGSETH(s,VFS), REGSETH(S,VFS),
12743 /* VFP DP Registers. */
12744 REGSET(d,VFD), REGSET(D,VFD),
12745 /* Extra Neon DP registers. */
12746 REGSETH(d,VFD), REGSETH(D,VFD),
12748 /* Neon QP registers. */
12749 REGSET2(q,NQ), REGSET2(Q,NQ),
12751 /* VFP control registers. */
12752 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12753 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12755 /* Maverick DSP coprocessor registers. */
12756 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12757 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12759 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12760 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12761 REGDEF(dspsc,0,DSPSC),
12763 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12764 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12765 REGDEF(DSPSC,0,DSPSC),
12767 /* iWMMXt data registers - p0, c0-15. */
12768 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12770 /* iWMMXt control registers - p1, c0-3. */
12771 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12772 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12773 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12774 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12776 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12777 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12778 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12779 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12780 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12782 /* XScale accumulator registers. */
12783 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12789 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12790 within psr_required_here. */
12791 static const struct asm_psr psrs[] =
12793 /* Backward compatibility notation. Note that "all" is no longer
12794 truly all possible PSR bits. */
12795 {"all", PSR_c | PSR_f},
12799 /* Individual flags. */
12804 /* Combinations of flags. */
12805 {"fs", PSR_f | PSR_s},
12806 {"fx", PSR_f | PSR_x},
12807 {"fc", PSR_f | PSR_c},
12808 {"sf", PSR_s | PSR_f},
12809 {"sx", PSR_s | PSR_x},
12810 {"sc", PSR_s | PSR_c},
12811 {"xf", PSR_x | PSR_f},
12812 {"xs", PSR_x | PSR_s},
12813 {"xc", PSR_x | PSR_c},
12814 {"cf", PSR_c | PSR_f},
12815 {"cs", PSR_c | PSR_s},
12816 {"cx", PSR_c | PSR_x},
12817 {"fsx", PSR_f | PSR_s | PSR_x},
12818 {"fsc", PSR_f | PSR_s | PSR_c},
12819 {"fxs", PSR_f | PSR_x | PSR_s},
12820 {"fxc", PSR_f | PSR_x | PSR_c},
12821 {"fcs", PSR_f | PSR_c | PSR_s},
12822 {"fcx", PSR_f | PSR_c | PSR_x},
12823 {"sfx", PSR_s | PSR_f | PSR_x},
12824 {"sfc", PSR_s | PSR_f | PSR_c},
12825 {"sxf", PSR_s | PSR_x | PSR_f},
12826 {"sxc", PSR_s | PSR_x | PSR_c},
12827 {"scf", PSR_s | PSR_c | PSR_f},
12828 {"scx", PSR_s | PSR_c | PSR_x},
12829 {"xfs", PSR_x | PSR_f | PSR_s},
12830 {"xfc", PSR_x | PSR_f | PSR_c},
12831 {"xsf", PSR_x | PSR_s | PSR_f},
12832 {"xsc", PSR_x | PSR_s | PSR_c},
12833 {"xcf", PSR_x | PSR_c | PSR_f},
12834 {"xcs", PSR_x | PSR_c | PSR_s},
12835 {"cfs", PSR_c | PSR_f | PSR_s},
12836 {"cfx", PSR_c | PSR_f | PSR_x},
12837 {"csf", PSR_c | PSR_s | PSR_f},
12838 {"csx", PSR_c | PSR_s | PSR_x},
12839 {"cxf", PSR_c | PSR_x | PSR_f},
12840 {"cxs", PSR_c | PSR_x | PSR_s},
12841 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12842 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12843 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12844 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12845 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12846 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12847 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12848 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12849 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12850 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12851 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12852 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12853 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12854 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12855 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12856 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12857 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12858 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12859 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12860 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12861 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12862 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12863 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12864 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12867 /* Table of V7M psr names. */
12868 static const struct asm_psr v7m_psrs[] =
12881 {"basepri_max", 18},
12886 /* Table of all shift-in-operand names. */
12887 static const struct asm_shift_name shift_names [] =
12889 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12890 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12891 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12892 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12893 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12894 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12897 /* Table of all explicit relocation names. */
12899 static struct reloc_entry reloc_names[] =
12901 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12902 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12903 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12904 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12905 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12906 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12907 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12908 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12909 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12910 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12911 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12915 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12916 static const struct asm_cond conds[] =
12920 {"cs", 0x2}, {"hs", 0x2},
12921 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12935 static struct asm_barrier_opt barrier_opt_names[] =
12943 /* Table of ARM-format instructions. */
12945 /* Macros for gluing together operand strings. N.B. In all cases
12946 other than OPS0, the trailing OP_stop comes from default
12947 zero-initialization of the unspecified elements of the array. */
12948 #define OPS0() { OP_stop, }
12949 #define OPS1(a) { OP_##a, }
12950 #define OPS2(a,b) { OP_##a,OP_##b, }
12951 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12952 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12953 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12954 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12956 /* These macros abstract out the exact format of the mnemonic table and
12957 save some repeated characters. */
12959 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12960 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12961 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12962 THUMB_VARIANT, do_##ae, do_##te }
12964 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12965 a T_MNEM_xyz enumerator. */
12966 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12967 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12968 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12969 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12971 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12972 infix after the third character. */
12973 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12974 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12975 THUMB_VARIANT, do_##ae, do_##te }
12976 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
12977 { #mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
12978 THUMB_VARIANT, do_##ae, do_##te }
12979 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12980 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12981 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
12982 TxC3w(mnem, aop, 0x##top, nops, ops, ae, te)
12983 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12984 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12985 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
12986 TxC3w(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12988 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12989 appear in the condition table. */
12990 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12991 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12992 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12994 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12995 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12996 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12997 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12998 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12999 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
13000 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
13001 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
13002 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
13003 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
13004 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
13005 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
13006 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
13007 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
13008 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
13009 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
13010 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
13011 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
13012 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
13013 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
13015 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
13016 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
13017 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
13018 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
13020 /* Mnemonic that cannot be conditionalized. The ARM condition-code
13021 field is still 0xE. Many of the Thumb variants can be executed
13022 conditionally, so this is checked separately. */
13023 #define TUE(mnem, op, top, nops, ops, ae, te) \
13024 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
13025 THUMB_VARIANT, do_##ae, do_##te }
13027 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
13028 condition code field. */
13029 #define TUF(mnem, op, top, nops, ops, ae, te) \
13030 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
13031 THUMB_VARIANT, do_##ae, do_##te }
13033 /* ARM-only variants of all the above. */
13034 #define CE(mnem, op, nops, ops, ae) \
13035 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13037 #define C3(mnem, op, nops, ops, ae) \
13038 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13040 /* Legacy mnemonics that always have conditional infix after the third
13042 #define CL(mnem, op, nops, ops, ae) \
13043 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13044 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13046 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
13047 #define cCE(mnem, op, nops, ops, ae) \
13048 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13050 /* Legacy coprocessor instructions where conditional infix and conditional
13051 suffix are ambiguous. For consistency this includes all FPA instructions,
13052 not just the potentially ambiguous ones. */
13053 #define cCL(mnem, op, nops, ops, ae) \
13054 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
13055 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13057 /* Coprocessor, takes either a suffix or a position-3 infix
13058 (for an FPA corner case). */
13059 #define C3E(mnem, op, nops, ops, ae) \
13060 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
13061 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
13063 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
13064 { #m1 #m2 #m3, OPS##nops ops, \
13065 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
13066 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
13068 #define CM(m1, m2, op, nops, ops, ae) \
13069 xCM_(m1, , m2, op, nops, ops, ae), \
13070 xCM_(m1, eq, m2, op, nops, ops, ae), \
13071 xCM_(m1, ne, m2, op, nops, ops, ae), \
13072 xCM_(m1, cs, m2, op, nops, ops, ae), \
13073 xCM_(m1, hs, m2, op, nops, ops, ae), \
13074 xCM_(m1, cc, m2, op, nops, ops, ae), \
13075 xCM_(m1, ul, m2, op, nops, ops, ae), \
13076 xCM_(m1, lo, m2, op, nops, ops, ae), \
13077 xCM_(m1, mi, m2, op, nops, ops, ae), \
13078 xCM_(m1, pl, m2, op, nops, ops, ae), \
13079 xCM_(m1, vs, m2, op, nops, ops, ae), \
13080 xCM_(m1, vc, m2, op, nops, ops, ae), \
13081 xCM_(m1, hi, m2, op, nops, ops, ae), \
13082 xCM_(m1, ls, m2, op, nops, ops, ae), \
13083 xCM_(m1, ge, m2, op, nops, ops, ae), \
13084 xCM_(m1, lt, m2, op, nops, ops, ae), \
13085 xCM_(m1, gt, m2, op, nops, ops, ae), \
13086 xCM_(m1, le, m2, op, nops, ops, ae), \
13087 xCM_(m1, al, m2, op, nops, ops, ae)
13089 #define UE(mnem, op, nops, ops, ae) \
13090 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13092 #define UF(mnem, op, nops, ops, ae) \
13093 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
13095 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
13096 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
13097 use the same encoding function for each. */
13098 #define NUF(mnem, op, nops, ops, enc) \
13099 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
13100 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13102 /* Neon data processing, version which indirects through neon_enc_tab for
13103 the various overloaded versions of opcodes. */
13104 #define nUF(mnem, op, nops, ops, enc) \
13105 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
13106 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13108 /* Neon insn with conditional suffix for the ARM version, non-overloaded
13110 #define NCE(mnem, op, nops, ops, enc) \
13111 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
13112 THUMB_VARIANT, do_##enc, do_##enc }
13114 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
13115 #define nCE(mnem, op, nops, ops, enc) \
13116 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
13117 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
13121 /* Thumb-only, unconditional. */
13122 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
13124 static const struct asm_opcode insns[] =
13126 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
13127 #define THUMB_VARIANT &arm_ext_v4t
13128 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
13129 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
13130 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
13131 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
13132 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
13133 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
13134 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
13135 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
13136 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
13137 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
13138 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
13139 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
13140 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
13141 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
13142 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
13143 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
13145 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
13146 for setting PSR flag bits. They are obsolete in V6 and do not
13147 have Thumb equivalents. */
13148 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13149 tC3w(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
13150 CL(tstp, 110f000, 2, (RR, SH), cmp),
13151 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13152 tC3w(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
13153 CL(cmpp, 150f000, 2, (RR, SH), cmp),
13154 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13155 tC3w(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
13156 CL(cmnp, 170f000, 2, (RR, SH), cmp),
13158 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
13159 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
13160 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
13161 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
13163 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
13164 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
13165 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
13166 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
13168 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13169 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13170 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13171 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13172 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13173 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13175 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
13176 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
13177 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
13178 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
13181 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
13182 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
13183 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
13185 /* Thumb-compatibility pseudo ops. */
13186 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
13187 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
13188 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
13189 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
13190 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
13191 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
13192 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
13193 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
13194 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
13195 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
13196 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
13197 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
13199 #undef THUMB_VARIANT
13200 #define THUMB_VARIANT &arm_ext_v6
13201 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
13203 /* V1 instructions with no Thumb analogue prior to V6T2. */
13204 #undef THUMB_VARIANT
13205 #define THUMB_VARIANT &arm_ext_v6t2
13206 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
13207 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
13208 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13209 TC3w(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
13210 CL(teqp, 130f000, 2, (RR, SH), cmp),
13212 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
13213 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
13214 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
13215 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
13217 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13218 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13220 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13221 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
13223 /* V1 instructions with no Thumb analogue at all. */
13224 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
13225 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
13227 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
13228 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
13229 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
13230 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
13231 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
13232 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
13233 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
13234 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
13237 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
13238 #undef THUMB_VARIANT
13239 #define THUMB_VARIANT &arm_ext_v4t
13240 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13241 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
13243 #undef THUMB_VARIANT
13244 #define THUMB_VARIANT &arm_ext_v6t2
13245 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13246 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
13248 /* Generic coprocessor instructions. */
13249 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13250 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13251 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13252 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13253 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13254 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13255 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13258 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
13259 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13260 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
13263 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
13264 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
13265 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
13268 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
13269 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13270 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13271 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13272 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13273 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13274 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13275 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
13276 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
13279 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
13280 #undef THUMB_VARIANT
13281 #define THUMB_VARIANT &arm_ext_v4t
13282 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
13283 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
13284 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13285 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13286 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
13287 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
13290 #define ARM_VARIANT &arm_ext_v4t_5
13291 /* ARM Architecture 4T. */
13292 /* Note: bx (and blx) are required on V5, even if the processor does
13293 not support Thumb. */
13294 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
13297 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
13298 #undef THUMB_VARIANT
13299 #define THUMB_VARIANT &arm_ext_v5t
13300 /* Note: blx has 2 variants; the .value coded here is for
13301 BLX(2). Only this variant has conditional execution. */
13302 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
13303 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
13305 #undef THUMB_VARIANT
13306 #define THUMB_VARIANT &arm_ext_v6t2
13307 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
13308 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
13309 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
13310 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
13311 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
13312 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
13313 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13314 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
13317 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
13318 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13319 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13320 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13321 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13323 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13324 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
13326 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13327 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13328 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13329 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
13331 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13332 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13333 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13334 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13336 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13337 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13339 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13340 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13341 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13342 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
13345 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
13346 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
13347 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13348 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
13350 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13351 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13354 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
13355 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
13358 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
13359 #undef THUMB_VARIANT
13360 #define THUMB_VARIANT &arm_ext_v6
13361 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
13362 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
13363 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13364 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13365 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
13366 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13367 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13368 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13369 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13370 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
13372 #undef THUMB_VARIANT
13373 #define THUMB_VARIANT &arm_ext_v6t2
13374 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
13375 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13376 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
13378 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
13379 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
13381 /* ARM V6 not included in V7M (eg. integer SIMD). */
13382 #undef THUMB_VARIANT
13383 #define THUMB_VARIANT &arm_ext_v6_notm
13384 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
13385 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
13386 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
13387 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13388 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13389 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13390 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13391 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13392 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13393 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13394 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13395 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13396 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13397 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13398 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13399 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13400 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13401 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13402 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13403 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13404 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13405 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13406 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13407 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13408 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13409 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13410 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13411 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13412 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13413 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13414 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13415 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13416 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13417 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13418 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13419 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13420 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13421 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13422 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13423 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13424 UF(rfeib, 9900a00, 1, (RRw), rfe),
13425 UF(rfeda, 8100a00, 1, (RRw), rfe),
13426 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13427 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
13428 UF(rfefa, 9900a00, 1, (RRw), rfe),
13429 UF(rfeea, 8100a00, 1, (RRw), rfe),
13430 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
13431 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13432 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13433 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13434 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13435 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13436 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13437 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
13438 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
13439 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
13440 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13441 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13442 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13443 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13444 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13445 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13446 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13447 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
13448 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13449 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13450 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13451 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13452 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13453 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13454 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13455 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13456 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13457 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13458 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
13459 UF(srsib, 9cd0500, 1, (I31w), srs),
13460 UF(srsda, 84d0500, 1, (I31w), srs),
13461 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
13462 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
13463 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
13464 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
13465 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
13466 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
13467 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
13470 #define ARM_VARIANT &arm_ext_v6k
13471 #undef THUMB_VARIANT
13472 #define THUMB_VARIANT &arm_ext_v6k
13473 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
13474 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
13475 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
13476 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
13478 #undef THUMB_VARIANT
13479 #define THUMB_VARIANT &arm_ext_v6_notm
13480 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
13481 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
13483 #undef THUMB_VARIANT
13484 #define THUMB_VARIANT &arm_ext_v6t2
13485 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13486 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
13487 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13488 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
13489 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
13492 #define ARM_VARIANT &arm_ext_v6z
13493 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
13496 #define ARM_VARIANT &arm_ext_v6t2
13497 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
13498 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
13499 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13500 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
13502 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
13503 TCE(movw, 3000000, f2400000, 2, (RRnpc, HALF), mov16, t_mov16),
13504 TCE(movt, 3400000, f2c00000, 2, (RRnpc, HALF), mov16, t_mov16),
13505 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
13507 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13508 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13509 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13510 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
13512 UT(cbnz, b900, 2, (RR, EXP), t_czb),
13513 UT(cbz, b100, 2, (RR, EXP), t_czb),
13514 /* ARM does not really have an IT instruction. */
13515 TUE(it, 0, bf08, 1, (COND), it, t_it),
13516 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
13517 TUE(ite, 0, bf04, 1, (COND), it, t_it),
13518 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
13519 TUE(itet, 0, bf06, 1, (COND), it, t_it),
13520 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
13521 TUE(itee, 0, bf02, 1, (COND), it, t_it),
13522 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
13523 TUE(itett, 0, bf07, 1, (COND), it, t_it),
13524 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
13525 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
13526 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
13527 TUE(itete, 0, bf05, 1, (COND), it, t_it),
13528 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
13529 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
13531 /* Thumb2 only instructions. */
13533 #define ARM_VARIANT NULL
13535 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13536 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
13537 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
13538 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
13540 /* Thumb-2 hardware division instructions (R and M profiles only). */
13541 #undef THUMB_VARIANT
13542 #define THUMB_VARIANT &arm_ext_div
13543 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
13544 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
13546 /* ARM V7 instructions. */
13548 #define ARM_VARIANT &arm_ext_v7
13549 #undef THUMB_VARIANT
13550 #define THUMB_VARIANT &arm_ext_v7
13551 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
13552 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
13553 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
13554 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
13555 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
13558 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
13559 cCE(wfs, e200110, 1, (RR), rd),
13560 cCE(rfs, e300110, 1, (RR), rd),
13561 cCE(wfc, e400110, 1, (RR), rd),
13562 cCE(rfc, e500110, 1, (RR), rd),
13564 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
13565 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
13566 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
13567 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
13569 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
13570 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
13571 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
13572 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
13574 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
13575 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
13576 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
13577 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
13578 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
13579 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
13580 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
13581 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
13582 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
13583 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
13584 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
13585 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
13587 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
13588 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
13589 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
13590 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
13591 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
13592 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
13593 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
13594 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
13595 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
13596 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
13597 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
13598 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
13600 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
13601 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
13602 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
13603 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
13604 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
13605 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
13606 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
13607 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
13608 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
13609 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
13610 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
13611 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
13613 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
13614 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
13615 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
13616 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
13617 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
13618 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
13619 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
13620 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
13621 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
13622 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
13623 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
13624 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
13626 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
13627 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
13628 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
13629 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
13630 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
13631 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
13632 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
13633 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
13634 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
13635 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
13636 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
13637 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
13639 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
13640 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
13641 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
13642 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
13643 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
13644 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
13645 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
13646 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
13647 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
13648 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
13649 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
13650 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
13652 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
13653 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
13654 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
13655 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
13656 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13657 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13658 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13659 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13660 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13661 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13662 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13663 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13665 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13666 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13667 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13668 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13669 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13670 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13671 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13672 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13673 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13674 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13675 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13676 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13678 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13679 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13680 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13681 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13682 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13683 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13684 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13685 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13686 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13687 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13688 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13689 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13691 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13692 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13693 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13694 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13695 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13696 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13697 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13698 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13699 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13700 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13701 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13702 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13704 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13705 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13706 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13707 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13708 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13709 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13710 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13711 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13712 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13713 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13714 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13715 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13717 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13718 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13719 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13720 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13721 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13722 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13723 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13724 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13725 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13726 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13727 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13728 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13730 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13731 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13732 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13733 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13734 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13735 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13736 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13737 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13738 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13739 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13740 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13741 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13743 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13744 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13745 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13746 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13747 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13748 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13749 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13750 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13751 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13752 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13753 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13754 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13756 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13757 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13758 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13759 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13760 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13761 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13762 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13763 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13764 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13765 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13766 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13767 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13769 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13770 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13771 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13772 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13773 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13774 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13775 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13776 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13777 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13778 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13779 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13780 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13782 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13783 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13784 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13785 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13786 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13787 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13788 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13789 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13790 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13791 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13792 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13793 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13795 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13796 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13797 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13798 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13799 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13800 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13801 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13802 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13803 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13804 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13805 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13806 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13808 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13809 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13810 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13811 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13812 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13813 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13814 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13815 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13816 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13817 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13818 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13819 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13821 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13822 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13823 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13824 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13825 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13826 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13827 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13828 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13829 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13830 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13831 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13832 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13834 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13835 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13836 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13837 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13838 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13839 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13840 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13841 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13842 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13843 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13844 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13845 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13847 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13848 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13849 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13850 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13851 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13852 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13853 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13854 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13855 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13856 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13857 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13858 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13860 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13861 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13862 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13863 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13864 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13865 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13866 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13867 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13868 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13869 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13870 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13871 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13873 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13874 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13875 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13876 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13877 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13878 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13879 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13880 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13881 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13882 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13883 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13884 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13886 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13887 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13888 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13889 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13890 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13891 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13892 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13893 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13894 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13895 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13896 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13897 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13899 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13900 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13901 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13902 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13903 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13904 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13905 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13906 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13907 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13908 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13909 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13910 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13912 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13913 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13914 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13915 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13916 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13917 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13918 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13919 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13920 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13921 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13922 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13923 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13925 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13926 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13927 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13928 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13929 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13930 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13931 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13932 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13933 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13934 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13935 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13936 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13938 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13939 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13940 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13941 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13942 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13943 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13944 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13945 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13946 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13947 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13948 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13949 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13951 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13952 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13953 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13954 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13956 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13957 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13958 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13959 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13960 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13961 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13962 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13963 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13964 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13965 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13966 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13967 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13969 /* The implementation of the FIX instruction is broken on some
13970 assemblers, in that it accepts a precision specifier as well as a
13971 rounding specifier, despite the fact that this is meaningless.
13972 To be more compatible, we accept it as well, though of course it
13973 does not set any bits. */
13974 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13975 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13976 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13977 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13978 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13979 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13980 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13981 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13982 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13983 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13984 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13985 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13986 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13988 /* Instructions that were new with the real FPA, call them V2. */
13990 #define ARM_VARIANT &fpu_fpa_ext_v2
13991 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13992 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13993 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13994 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13995 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13996 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13999 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
14000 /* Moves and type conversions. */
14001 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
14002 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
14003 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
14004 cCE(fmstat, ef1fa10, 0, (), noargs),
14005 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
14006 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
14007 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
14008 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14009 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
14010 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
14011 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
14012 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
14014 /* Memory operations. */
14015 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
14016 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
14017 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14018 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14019 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14020 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14021 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14022 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14023 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14024 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14025 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14026 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
14027 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14028 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
14029 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14030 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
14031 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14032 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
14034 /* Monadic operations. */
14035 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
14036 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
14037 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
14039 /* Dyadic operations. */
14040 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14041 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14042 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14043 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14044 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14045 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14046 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14047 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14048 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
14051 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
14052 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
14053 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
14054 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
14057 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
14058 /* Moves and type conversions. */
14059 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14060 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
14061 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14062 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
14063 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
14064 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
14065 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
14066 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
14067 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
14068 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
14069 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14070 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
14071 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
14073 /* Memory operations. */
14074 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
14075 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
14076 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14077 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14078 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14079 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14080 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14081 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
14082 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14083 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
14085 /* Monadic operations. */
14086 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14087 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14088 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14090 /* Dyadic operations. */
14091 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14092 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14093 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14094 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14095 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14096 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14097 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14098 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14099 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
14102 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
14103 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
14104 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
14105 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
14108 #define ARM_VARIANT &fpu_vfp_ext_v2
14109 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
14110 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
14111 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
14112 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
14114 #undef THUMB_VARIANT
14115 #define THUMB_VARIANT &fpu_neon_ext_v1
14117 #define ARM_VARIANT &fpu_neon_ext_v1
14118 /* Data processing with three registers of the same length. */
14119 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
14120 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
14121 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
14122 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14123 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14124 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14125 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14126 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
14127 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
14128 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
14129 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14130 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14131 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14132 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14133 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14134 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14135 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
14136 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
14137 /* If not immediate, fall back to neon_dyadic_i64_su.
14138 shl_imm should accept I8 I16 I32 I64,
14139 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
14140 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
14141 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
14142 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
14143 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
14144 /* Logic ops, types optional & ignored. */
14145 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
14146 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
14147 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
14148 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
14149 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
14150 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
14151 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
14152 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
14153 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
14154 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
14155 /* Bitfield ops, untyped. */
14156 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14157 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14158 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14159 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14160 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
14161 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
14162 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
14163 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14164 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14165 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14166 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14167 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
14168 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
14169 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
14170 back to neon_dyadic_if_su. */
14171 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14172 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14173 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
14174 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
14175 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14176 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14177 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
14178 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
14179 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
14180 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
14181 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
14182 /* As above, D registers only. */
14183 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14184 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
14185 /* Int and float variants, signedness unimportant. */
14186 /* If not scalar, fall back to neon_dyadic_if_i. */
14187 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14188 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14189 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14190 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
14191 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
14192 /* Add/sub take types I8 I16 I32 I64 F32. */
14193 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14194 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14195 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
14196 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
14197 /* vtst takes sizes 8, 16, 32. */
14198 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
14199 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
14200 /* VMUL takes I8 I16 I32 F32 P8. */
14201 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
14202 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
14203 /* VQD{R}MULH takes S16 S32. */
14204 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14205 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14206 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
14207 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
14208 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14209 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14210 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
14211 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
14212 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14213 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14214 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
14215 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
14216 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14217 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14218 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
14219 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
14221 /* Two address, int/float. Types S8 S16 S32 F32. */
14222 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
14223 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
14224 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
14225 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
14227 /* Data processing with two registers and a shift amount. */
14228 /* Right shifts, and variants with rounding.
14229 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
14230 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14231 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14232 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
14233 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
14234 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14235 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14236 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
14237 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
14238 /* Shift and insert. Sizes accepted 8 16 32 64. */
14239 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
14240 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
14241 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
14242 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
14243 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
14244 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
14245 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
14246 /* Right shift immediate, saturating & narrowing, with rounding variants.
14247 Types accepted S16 S32 S64 U16 U32 U64. */
14248 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14249 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
14250 /* As above, unsigned. Types accepted S16 S32 S64. */
14251 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14252 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
14253 /* Right shift narrowing. Types accepted I16 I32 I64. */
14254 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14255 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
14256 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
14257 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
14258 /* CVT with optional immediate for fixed-point variant. */
14259 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
14260 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
14262 /* One register and an immediate value. All encoding special-cased! */
14263 #undef THUMB_VARIANT
14264 #define THUMB_VARIANT &fpu_vfp_ext_v1
14266 #define ARM_VARIANT &fpu_vfp_ext_v1
14267 NCE(vmov, 0, 1, (VMOV), neon_mov),
14269 #undef THUMB_VARIANT
14270 #define THUMB_VARIANT &fpu_neon_ext_v1
14272 #define ARM_VARIANT &fpu_neon_ext_v1
14273 NCE(vmovq, 0, 1, (VMOV), neon_mov),
14274 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
14275 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
14277 /* Data processing, three registers of different lengths. */
14278 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
14279 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
14280 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
14281 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
14282 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
14283 /* If not scalar, fall back to neon_dyadic_long.
14284 Vector types as above, scalar types S16 S32 U16 U32. */
14285 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14286 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
14287 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
14288 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14289 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
14290 /* Dyadic, narrowing insns. Types I16 I32 I64. */
14291 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14292 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14293 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14294 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
14295 /* Saturating doubling multiplies. Types S16 S32. */
14296 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14297 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14298 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
14299 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
14300 S16 S32 U16 U32. */
14301 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
14303 /* Extract. Size 8. */
14304 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
14305 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
14307 /* Two registers, miscellaneous. */
14308 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
14309 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
14310 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
14311 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
14312 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
14313 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
14314 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
14315 /* Vector replicate. Sizes 8 16 32. */
14316 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
14317 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
14318 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
14319 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
14320 /* VMOVN. Types I16 I32 I64. */
14321 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
14322 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
14323 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
14324 /* VQMOVUN. Types S16 S32 S64. */
14325 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
14326 /* VZIP / VUZP. Sizes 8 16 32. */
14327 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
14328 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
14329 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
14330 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
14331 /* VQABS / VQNEG. Types S8 S16 S32. */
14332 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14333 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
14334 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
14335 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
14336 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
14337 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
14338 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
14339 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
14340 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
14341 /* Reciprocal estimates. Types U32 F32. */
14342 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
14343 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
14344 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
14345 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
14346 /* VCLS. Types S8 S16 S32. */
14347 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
14348 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
14349 /* VCLZ. Types I8 I16 I32. */
14350 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
14351 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
14352 /* VCNT. Size 8. */
14353 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
14354 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
14355 /* Two address, untyped. */
14356 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
14357 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
14358 /* VTRN. Sizes 8 16 32. */
14359 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
14360 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
14362 /* Table lookup. Size 8. */
14363 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14364 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
14366 #undef THUMB_VARIANT
14367 #define THUMB_VARIANT &fpu_vfp_ext_v1xd
14369 #define ARM_VARIANT &fpu_vfp_ext_v1xd
14371 /* Load/store instructions. Available in Neon or VFPv3. */
14372 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14373 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
14374 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
14375 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14376 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
14377 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
14378 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
14379 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
14381 #undef THUMB_VARIANT
14382 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
14384 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
14386 /* Neon element/structure load/store. */
14387 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14388 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
14389 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14390 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
14391 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14392 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
14393 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14394 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
14396 #undef THUMB_VARIANT
14397 #define THUMB_VARIANT &fpu_vfp_ext_v3
14399 #define ARM_VARIANT &fpu_vfp_ext_v3
14401 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
14402 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
14403 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14404 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14405 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14406 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14407 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14408 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14409 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14410 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14411 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14412 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14413 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14414 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14415 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
14416 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
14417 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
14418 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
14420 #undef THUMB_VARIANT
14422 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
14423 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14424 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14425 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14426 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14427 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14428 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
14429 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
14430 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
14433 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
14434 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
14435 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
14436 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
14437 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
14438 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
14439 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
14440 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
14441 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
14442 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
14443 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14444 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14445 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
14446 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14447 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14448 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
14449 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14450 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14451 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
14452 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
14453 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
14454 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14455 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14456 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14457 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14458 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14459 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
14460 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
14461 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
14462 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
14463 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
14464 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
14465 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
14466 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
14467 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
14468 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
14469 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
14470 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
14471 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14472 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14473 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14474 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14475 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14476 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14477 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14478 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14479 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14480 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
14481 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14482 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14483 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14484 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14485 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14486 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14487 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14488 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14489 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14490 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14491 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14492 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14493 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14494 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14495 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14496 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14497 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14498 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14499 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14500 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14501 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14502 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14503 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14504 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14505 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14506 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14507 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14508 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14509 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14510 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14511 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14512 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14513 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14514 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14515 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14516 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14517 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14518 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14519 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14520 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14521 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14522 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
14523 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14524 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14525 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14526 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14527 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14528 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14529 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14530 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14531 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14532 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14533 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14534 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14535 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14536 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14537 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14538 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14539 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14540 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14541 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14542 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14543 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14544 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
14545 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14546 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14547 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14548 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14549 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14550 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14551 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14552 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14553 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14554 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14555 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14556 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14557 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14558 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14559 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14560 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14561 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14562 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
14563 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14564 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
14565 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
14566 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
14567 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14568 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14569 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14570 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14571 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14572 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14573 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14574 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14575 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14576 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
14577 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
14578 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
14579 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
14580 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
14581 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
14582 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14583 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14584 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14585 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
14586 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
14587 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
14588 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
14589 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
14590 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
14591 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14592 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14593 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14594 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
14595 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
14598 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
14599 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
14600 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
14601 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
14602 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
14603 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
14604 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
14605 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
14606 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
14607 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
14608 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
14609 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
14610 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
14611 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
14612 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
14613 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
14614 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
14615 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
14616 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
14617 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
14618 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
14619 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
14620 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
14621 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
14622 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
14623 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
14624 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
14625 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
14626 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
14627 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
14628 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
14629 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
14630 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
14631 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
14632 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
14633 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
14634 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
14635 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
14636 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
14637 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
14638 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
14639 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
14640 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
14641 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
14642 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
14643 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
14644 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
14645 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
14646 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
14647 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
14648 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
14649 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
14650 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
14651 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
14652 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
14653 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
14654 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
14655 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
14656 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
14657 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
14658 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
14659 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
14660 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
14661 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
14662 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
14663 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14664 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14665 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14666 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14667 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14668 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
14669 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14670 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14671 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14672 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14673 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14674 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14677 #undef THUMB_VARIANT
14704 /* MD interface: bits in the object file. */
14706 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14707 for use in the a.out file, and stores them in the array pointed to by buf.
14708 This knows about the endian-ness of the target machine and does
14709 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14710 2 (short) and 4 (long) Floating numbers are put out as a series of
14711 LITTLENUMS (shorts, here at least). */
14714 md_number_to_chars (char * buf, valueT val, int n)
14716 if (target_big_endian)
14717 number_to_chars_bigendian (buf, val, n);
14719 number_to_chars_littleendian (buf, val, n);
14723 md_chars_to_number (char * buf, int n)
14726 unsigned char * where = (unsigned char *) buf;
14728 if (target_big_endian)
14733 result |= (*where++ & 255);
14741 result |= (where[n] & 255);
14748 /* MD interface: Sections. */
14750 /* Estimate the size of a frag before relaxing. Assume everything fits in
14754 md_estimate_size_before_relax (fragS * fragp,
14755 segT segtype ATTRIBUTE_UNUSED)
14761 /* Convert a machine dependent frag. */
14764 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14766 unsigned long insn;
14767 unsigned long old_op;
14775 buf = fragp->fr_literal + fragp->fr_fix;
14777 old_op = bfd_get_16(abfd, buf);
14778 if (fragp->fr_symbol) {
14779 exp.X_op = O_symbol;
14780 exp.X_add_symbol = fragp->fr_symbol;
14782 exp.X_op = O_constant;
14784 exp.X_add_number = fragp->fr_offset;
14785 opcode = fragp->fr_subtype;
14788 case T_MNEM_ldr_pc:
14789 case T_MNEM_ldr_pc2:
14790 case T_MNEM_ldr_sp:
14791 case T_MNEM_str_sp:
14798 if (fragp->fr_var == 4)
14800 insn = THUMB_OP32(opcode);
14801 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14803 insn |= (old_op & 0x700) << 4;
14807 insn |= (old_op & 7) << 12;
14808 insn |= (old_op & 0x38) << 13;
14810 insn |= 0x00000c00;
14811 put_thumb32_insn (buf, insn);
14812 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14816 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14818 pc_rel = (opcode == T_MNEM_ldr_pc2);
14821 if (fragp->fr_var == 4)
14823 insn = THUMB_OP32 (opcode);
14824 insn |= (old_op & 0xf0) << 4;
14825 put_thumb32_insn (buf, insn);
14826 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14830 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14831 exp.X_add_number -= 4;
14839 if (fragp->fr_var == 4)
14841 int r0off = (opcode == T_MNEM_mov
14842 || opcode == T_MNEM_movs) ? 0 : 8;
14843 insn = THUMB_OP32 (opcode);
14844 insn = (insn & 0xe1ffffff) | 0x10000000;
14845 insn |= (old_op & 0x700) << r0off;
14846 put_thumb32_insn (buf, insn);
14847 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14851 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14856 if (fragp->fr_var == 4)
14858 insn = THUMB_OP32(opcode);
14859 put_thumb32_insn (buf, insn);
14860 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14863 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14867 if (fragp->fr_var == 4)
14869 insn = THUMB_OP32(opcode);
14870 insn |= (old_op & 0xf00) << 14;
14871 put_thumb32_insn (buf, insn);
14872 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14875 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14878 case T_MNEM_add_sp:
14879 case T_MNEM_add_pc:
14880 case T_MNEM_inc_sp:
14881 case T_MNEM_dec_sp:
14882 if (fragp->fr_var == 4)
14884 /* ??? Choose between add and addw. */
14885 insn = THUMB_OP32 (opcode);
14886 insn |= (old_op & 0xf0) << 4;
14887 put_thumb32_insn (buf, insn);
14888 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14891 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14899 if (fragp->fr_var == 4)
14901 insn = THUMB_OP32 (opcode);
14902 insn |= (old_op & 0xf0) << 4;
14903 insn |= (old_op & 0xf) << 16;
14904 put_thumb32_insn (buf, insn);
14905 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14908 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14914 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14916 fixp->fx_file = fragp->fr_file;
14917 fixp->fx_line = fragp->fr_line;
14918 fragp->fr_fix += fragp->fr_var;
14921 /* Return the size of a relaxable immediate operand instruction.
14922 SHIFT and SIZE specify the form of the allowable immediate. */
14924 relax_immediate (fragS *fragp, int size, int shift)
14930 /* ??? Should be able to do better than this. */
14931 if (fragp->fr_symbol)
14934 low = (1 << shift) - 1;
14935 mask = (1 << (shift + size)) - (1 << shift);
14936 offset = fragp->fr_offset;
14937 /* Force misaligned offsets to 32-bit variant. */
14940 if (offset & ~mask)
14945 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14948 relax_adr (fragS *fragp, asection *sec)
14953 /* Assume worst case for symbols not known to be in the same section. */
14954 if (!S_IS_DEFINED(fragp->fr_symbol)
14955 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14958 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14959 addr = fragp->fr_address + fragp->fr_fix;
14960 addr = (addr + 4) & ~3;
14961 /* Fix the insn as the 4-byte version if the target address is not
14962 sufficiently aligned. This is prevents an infinite loop when two
14963 instructions have contradictory range/alignment requirements. */
14967 if (val < 0 || val > 1020)
14972 /* Return the size of a relaxable add/sub immediate instruction. */
14974 relax_addsub (fragS *fragp, asection *sec)
14979 buf = fragp->fr_literal + fragp->fr_fix;
14980 op = bfd_get_16(sec->owner, buf);
14981 if ((op & 0xf) == ((op >> 4) & 0xf))
14982 return relax_immediate (fragp, 8, 0);
14984 return relax_immediate (fragp, 3, 0);
14988 /* Return the size of a relaxable branch instruction. BITS is the
14989 size of the offset field in the narrow instruction. */
14992 relax_branch (fragS *fragp, asection *sec, int bits)
14998 /* Assume worst case for symbols not known to be in the same section. */
14999 if (!S_IS_DEFINED(fragp->fr_symbol)
15000 || sec != S_GET_SEGMENT (fragp->fr_symbol))
15003 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
15004 addr = fragp->fr_address + fragp->fr_fix + 4;
15007 /* Offset is a signed value *2 */
15009 if (val >= limit || val < -limit)
15015 /* Relax a machine dependent frag. This returns the amount by which
15016 the current size of the frag should change. */
15019 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
15024 oldsize = fragp->fr_var;
15025 switch (fragp->fr_subtype)
15027 case T_MNEM_ldr_pc2:
15028 newsize = relax_adr(fragp, sec);
15030 case T_MNEM_ldr_pc:
15031 case T_MNEM_ldr_sp:
15032 case T_MNEM_str_sp:
15033 newsize = relax_immediate(fragp, 8, 2);
15037 newsize = relax_immediate(fragp, 5, 2);
15041 newsize = relax_immediate(fragp, 5, 1);
15045 newsize = relax_immediate(fragp, 5, 0);
15048 newsize = relax_adr(fragp, sec);
15054 newsize = relax_immediate(fragp, 8, 0);
15057 newsize = relax_branch(fragp, sec, 11);
15060 newsize = relax_branch(fragp, sec, 8);
15062 case T_MNEM_add_sp:
15063 case T_MNEM_add_pc:
15064 newsize = relax_immediate (fragp, 8, 2);
15066 case T_MNEM_inc_sp:
15067 case T_MNEM_dec_sp:
15068 newsize = relax_immediate (fragp, 7, 2);
15074 newsize = relax_addsub (fragp, sec);
15081 fragp->fr_var = -newsize;
15082 md_convert_frag (sec->owner, sec, fragp);
15084 return -(newsize + oldsize);
15086 fragp->fr_var = newsize;
15087 return newsize - oldsize;
15090 /* Round up a section size to the appropriate boundary. */
15093 md_section_align (segT segment ATTRIBUTE_UNUSED,
15099 /* Round all sects to multiple of 4. */
15100 return (size + 3) & ~3;
15104 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
15105 of an rs_align_code fragment. */
15108 arm_handle_align (fragS * fragP)
15110 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
15111 static char const thumb_noop[2] = { 0xc0, 0x46 };
15112 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
15113 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
15115 int bytes, fix, noop_size;
15119 if (fragP->fr_type != rs_align_code)
15122 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
15123 p = fragP->fr_literal + fragP->fr_fix;
15126 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
15127 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
15129 if (fragP->tc_frag_data)
15131 if (target_big_endian)
15132 noop = thumb_bigend_noop;
15135 noop_size = sizeof (thumb_noop);
15139 if (target_big_endian)
15140 noop = arm_bigend_noop;
15143 noop_size = sizeof (arm_noop);
15146 if (bytes & (noop_size - 1))
15148 fix = bytes & (noop_size - 1);
15149 memset (p, 0, fix);
15154 while (bytes >= noop_size)
15156 memcpy (p, noop, noop_size);
15158 bytes -= noop_size;
15162 fragP->fr_fix += fix;
15163 fragP->fr_var = noop_size;
15166 /* Called from md_do_align. Used to create an alignment
15167 frag in a code section. */
15170 arm_frag_align_code (int n, int max)
15174 /* We assume that there will never be a requirement
15175 to support alignments greater than 32 bytes. */
15176 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
15177 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
15179 p = frag_var (rs_align_code,
15180 MAX_MEM_FOR_RS_ALIGN_CODE,
15182 (relax_substateT) max,
15189 /* Perform target specific initialisation of a frag. */
15192 arm_init_frag (fragS * fragP)
15194 /* Record whether this frag is in an ARM or a THUMB area. */
15195 fragP->tc_frag_data = thumb_mode;
15199 /* When we change sections we need to issue a new mapping symbol. */
15202 arm_elf_change_section (void)
15205 segment_info_type *seginfo;
15207 /* Link an unlinked unwind index table section to the .text section. */
15208 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
15209 && elf_linked_to_section (now_seg) == NULL)
15210 elf_linked_to_section (now_seg) = text_section;
15212 if (!SEG_NORMAL (now_seg))
15215 flags = bfd_get_section_flags (stdoutput, now_seg);
15217 /* We can ignore sections that only contain debug info. */
15218 if ((flags & SEC_ALLOC) == 0)
15221 seginfo = seg_info (now_seg);
15222 mapstate = seginfo->tc_segment_info_data.mapstate;
15223 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
15227 arm_elf_section_type (const char * str, size_t len)
15229 if (len == 5 && strncmp (str, "exidx", 5) == 0)
15230 return SHT_ARM_EXIDX;
15235 /* Code to deal with unwinding tables. */
15237 static void add_unwind_adjustsp (offsetT);
15239 /* Cenerate and deferred unwind frame offset. */
15242 flush_pending_unwind (void)
15246 offset = unwind.pending_offset;
15247 unwind.pending_offset = 0;
15249 add_unwind_adjustsp (offset);
15252 /* Add an opcode to this list for this function. Two-byte opcodes should
15253 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
15257 add_unwind_opcode (valueT op, int length)
15259 /* Add any deferred stack adjustment. */
15260 if (unwind.pending_offset)
15261 flush_pending_unwind ();
15263 unwind.sp_restored = 0;
15265 if (unwind.opcode_count + length > unwind.opcode_alloc)
15267 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
15268 if (unwind.opcodes)
15269 unwind.opcodes = xrealloc (unwind.opcodes,
15270 unwind.opcode_alloc);
15272 unwind.opcodes = xmalloc (unwind.opcode_alloc);
15277 unwind.opcodes[unwind.opcode_count] = op & 0xff;
15279 unwind.opcode_count++;
15283 /* Add unwind opcodes to adjust the stack pointer. */
15286 add_unwind_adjustsp (offsetT offset)
15290 if (offset > 0x200)
15292 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
15297 /* Long form: 0xb2, uleb128. */
15298 /* This might not fit in a word so add the individual bytes,
15299 remembering the list is built in reverse order. */
15300 o = (valueT) ((offset - 0x204) >> 2);
15302 add_unwind_opcode (0, 1);
15304 /* Calculate the uleb128 encoding of the offset. */
15308 bytes[n] = o & 0x7f;
15314 /* Add the insn. */
15316 add_unwind_opcode (bytes[n - 1], 1);
15317 add_unwind_opcode (0xb2, 1);
15319 else if (offset > 0x100)
15321 /* Two short opcodes. */
15322 add_unwind_opcode (0x3f, 1);
15323 op = (offset - 0x104) >> 2;
15324 add_unwind_opcode (op, 1);
15326 else if (offset > 0)
15328 /* Short opcode. */
15329 op = (offset - 4) >> 2;
15330 add_unwind_opcode (op, 1);
15332 else if (offset < 0)
15335 while (offset > 0x100)
15337 add_unwind_opcode (0x7f, 1);
15340 op = ((offset - 4) >> 2) | 0x40;
15341 add_unwind_opcode (op, 1);
15345 /* Finish the list of unwind opcodes for this function. */
15347 finish_unwind_opcodes (void)
15351 if (unwind.fp_used)
15353 /* Adjust sp as necessary. */
15354 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
15355 flush_pending_unwind ();
15357 /* After restoring sp from the frame pointer. */
15358 op = 0x90 | unwind.fp_reg;
15359 add_unwind_opcode (op, 1);
15362 flush_pending_unwind ();
15366 /* Start an exception table entry. If idx is nonzero this is an index table
15370 start_unwind_section (const segT text_seg, int idx)
15372 const char * text_name;
15373 const char * prefix;
15374 const char * prefix_once;
15375 const char * group_name;
15379 size_t sec_name_len;
15386 prefix = ELF_STRING_ARM_unwind;
15387 prefix_once = ELF_STRING_ARM_unwind_once;
15388 type = SHT_ARM_EXIDX;
15392 prefix = ELF_STRING_ARM_unwind_info;
15393 prefix_once = ELF_STRING_ARM_unwind_info_once;
15394 type = SHT_PROGBITS;
15397 text_name = segment_name (text_seg);
15398 if (streq (text_name, ".text"))
15401 if (strncmp (text_name, ".gnu.linkonce.t.",
15402 strlen (".gnu.linkonce.t.")) == 0)
15404 prefix = prefix_once;
15405 text_name += strlen (".gnu.linkonce.t.");
15408 prefix_len = strlen (prefix);
15409 text_len = strlen (text_name);
15410 sec_name_len = prefix_len + text_len;
15411 sec_name = xmalloc (sec_name_len + 1);
15412 memcpy (sec_name, prefix, prefix_len);
15413 memcpy (sec_name + prefix_len, text_name, text_len);
15414 sec_name[prefix_len + text_len] = '\0';
15420 /* Handle COMDAT group. */
15421 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
15423 group_name = elf_group_name (text_seg);
15424 if (group_name == NULL)
15426 as_bad ("Group section `%s' has no group signature",
15427 segment_name (text_seg));
15428 ignore_rest_of_line ();
15431 flags |= SHF_GROUP;
15435 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
15437 /* Set the setion link for index tables. */
15439 elf_linked_to_section (now_seg) = text_seg;
15443 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
15444 personality routine data. Returns zero, or the index table value for
15445 and inline entry. */
15448 create_unwind_entry (int have_data)
15453 /* The current word of data. */
15455 /* The number of bytes left in this word. */
15458 finish_unwind_opcodes ();
15460 /* Remember the current text section. */
15461 unwind.saved_seg = now_seg;
15462 unwind.saved_subseg = now_subseg;
15464 start_unwind_section (now_seg, 0);
15466 if (unwind.personality_routine == NULL)
15468 if (unwind.personality_index == -2)
15471 as_bad (_("handerdata in cantunwind frame"));
15472 return 1; /* EXIDX_CANTUNWIND. */
15475 /* Use a default personality routine if none is specified. */
15476 if (unwind.personality_index == -1)
15478 if (unwind.opcode_count > 3)
15479 unwind.personality_index = 1;
15481 unwind.personality_index = 0;
15484 /* Space for the personality routine entry. */
15485 if (unwind.personality_index == 0)
15487 if (unwind.opcode_count > 3)
15488 as_bad (_("too many unwind opcodes for personality routine 0"));
15492 /* All the data is inline in the index table. */
15495 while (unwind.opcode_count > 0)
15497 unwind.opcode_count--;
15498 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15502 /* Pad with "finish" opcodes. */
15504 data = (data << 8) | 0xb0;
15511 /* We get two opcodes "free" in the first word. */
15512 size = unwind.opcode_count - 2;
15515 /* An extra byte is required for the opcode count. */
15516 size = unwind.opcode_count + 1;
15518 size = (size + 3) >> 2;
15520 as_bad (_("too many unwind opcodes"));
15522 frag_align (2, 0, 0);
15523 record_alignment (now_seg, 2);
15524 unwind.table_entry = expr_build_dot ();
15526 /* Allocate the table entry. */
15527 ptr = frag_more ((size << 2) + 4);
15528 where = frag_now_fix () - ((size << 2) + 4);
15530 switch (unwind.personality_index)
15533 /* ??? Should this be a PLT generating relocation? */
15534 /* Custom personality routine. */
15535 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
15536 BFD_RELOC_ARM_PREL31);
15541 /* Set the first byte to the number of additional words. */
15546 /* ABI defined personality routines. */
15548 /* Three opcodes bytes are packed into the first word. */
15555 /* The size and first two opcode bytes go in the first word. */
15556 data = ((0x80 + unwind.personality_index) << 8) | size;
15561 /* Should never happen. */
15565 /* Pack the opcodes into words (MSB first), reversing the list at the same
15567 while (unwind.opcode_count > 0)
15571 md_number_to_chars (ptr, data, 4);
15576 unwind.opcode_count--;
15578 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
15581 /* Finish off the last word. */
15584 /* Pad with "finish" opcodes. */
15586 data = (data << 8) | 0xb0;
15588 md_number_to_chars (ptr, data, 4);
15593 /* Add an empty descriptor if there is no user-specified data. */
15594 ptr = frag_more (4);
15595 md_number_to_chars (ptr, 0, 4);
15601 /* Convert REGNAME to a DWARF-2 register number. */
15604 tc_arm_regname_to_dw2regnum (char *regname)
15606 int reg = arm_reg_parse (®name, REG_TYPE_RN);
15614 /* Initialize the DWARF-2 unwind information for this procedure. */
15617 tc_arm_frame_initial_instructions (void)
15619 cfi_add_CFA_def_cfa (REG_SP, 0);
15621 #endif /* OBJ_ELF */
15624 /* MD interface: Symbol and relocation handling. */
15626 /* Return the address within the segment that a PC-relative fixup is
15627 relative to. For ARM, PC-relative fixups applied to instructions
15628 are generally relative to the location of the fixup plus 8 bytes.
15629 Thumb branches are offset by 4, and Thumb loads relative to PC
15630 require special handling. */
15633 md_pcrel_from_section (fixS * fixP, segT seg)
15635 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
15637 /* If this is pc-relative and we are going to emit a relocation
15638 then we just want to put out any pipeline compensation that the linker
15639 will need. Otherwise we want to use the calculated base.
15640 For WinCE we skip the bias for externals as well, since this
15641 is how the MS ARM-CE assembler behaves and we want to be compatible. */
15643 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
15644 || (arm_force_relocation (fixP)
15646 && !S_IS_EXTERNAL (fixP->fx_addsy)
15651 switch (fixP->fx_r_type)
15653 /* PC relative addressing on the Thumb is slightly odd as the
15654 bottom two bits of the PC are forced to zero for the
15655 calculation. This happens *after* application of the
15656 pipeline offset. However, Thumb adrl already adjusts for
15657 this, so we need not do it again. */
15658 case BFD_RELOC_ARM_THUMB_ADD:
15661 case BFD_RELOC_ARM_THUMB_OFFSET:
15662 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15663 case BFD_RELOC_ARM_T32_ADD_PC12:
15664 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15665 return (base + 4) & ~3;
15667 /* Thumb branches are simply offset by +4. */
15668 case BFD_RELOC_THUMB_PCREL_BRANCH7:
15669 case BFD_RELOC_THUMB_PCREL_BRANCH9:
15670 case BFD_RELOC_THUMB_PCREL_BRANCH12:
15671 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15672 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15673 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15674 case BFD_RELOC_THUMB_PCREL_BLX:
15677 /* ARM mode branches are offset by +8. However, the Windows CE
15678 loader expects the relocation not to take this into account. */
15679 case BFD_RELOC_ARM_PCREL_BRANCH:
15680 case BFD_RELOC_ARM_PCREL_CALL:
15681 case BFD_RELOC_ARM_PCREL_JUMP:
15682 case BFD_RELOC_ARM_PCREL_BLX:
15683 case BFD_RELOC_ARM_PLT32:
15685 /* When handling fixups immediately, because we have already
15686 discovered the value of a symbol, or the address of the frag involved
15687 we must account for the offset by +8, as the OS loader will never see the reloc.
15688 see fixup_segment() in write.c
15689 The S_IS_EXTERNAL test handles the case of global symbols.
15690 Those need the calculated base, not just the pipe compensation the linker will need. */
15692 && fixP->fx_addsy != NULL
15693 && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
15694 && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
15701 /* ARM mode loads relative to PC are also offset by +8. Unlike
15702 branches, the Windows CE loader *does* expect the relocation
15703 to take this into account. */
15704 case BFD_RELOC_ARM_OFFSET_IMM:
15705 case BFD_RELOC_ARM_OFFSET_IMM8:
15706 case BFD_RELOC_ARM_HWLITERAL:
15707 case BFD_RELOC_ARM_LITERAL:
15708 case BFD_RELOC_ARM_CP_OFF_IMM:
15712 /* Other PC-relative relocations are un-offset. */
15718 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15719 Otherwise we have no need to default values of symbols. */
15722 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15725 if (name[0] == '_' && name[1] == 'G'
15726 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15730 if (symbol_find (name))
15731 as_bad ("GOT already in the symbol table");
15733 GOT_symbol = symbol_new (name, undefined_section,
15734 (valueT) 0, & zero_address_frag);
15744 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15745 computed as two separate immediate values, added together. We
15746 already know that this value cannot be computed by just one ARM
15749 static unsigned int
15750 validate_immediate_twopart (unsigned int val,
15751 unsigned int * highpart)
15756 for (i = 0; i < 32; i += 2)
15757 if (((a = rotate_left (val, i)) & 0xff) != 0)
15763 * highpart = (a >> 8) | ((i + 24) << 7);
15765 else if (a & 0xff0000)
15767 if (a & 0xff000000)
15769 * highpart = (a >> 16) | ((i + 16) << 7);
15773 assert (a & 0xff000000);
15774 * highpart = (a >> 24) | ((i + 8) << 7);
15777 return (a & 0xff) | (i << 7);
15784 validate_offset_imm (unsigned int val, int hwse)
15786 if ((hwse && val > 255) || val > 4095)
15791 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15792 negative immediate constant by altering the instruction. A bit of
15797 by inverting the second operand, and
15800 by negating the second operand. */
15803 negate_data_op (unsigned long * instruction,
15804 unsigned long value)
15807 unsigned long negated, inverted;
15809 negated = encode_arm_immediate (-value);
15810 inverted = encode_arm_immediate (~value);
15812 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15815 /* First negates. */
15816 case OPCODE_SUB: /* ADD <-> SUB */
15817 new_inst = OPCODE_ADD;
15822 new_inst = OPCODE_SUB;
15826 case OPCODE_CMP: /* CMP <-> CMN */
15827 new_inst = OPCODE_CMN;
15832 new_inst = OPCODE_CMP;
15836 /* Now Inverted ops. */
15837 case OPCODE_MOV: /* MOV <-> MVN */
15838 new_inst = OPCODE_MVN;
15843 new_inst = OPCODE_MOV;
15847 case OPCODE_AND: /* AND <-> BIC */
15848 new_inst = OPCODE_BIC;
15853 new_inst = OPCODE_AND;
15857 case OPCODE_ADC: /* ADC <-> SBC */
15858 new_inst = OPCODE_SBC;
15863 new_inst = OPCODE_ADC;
15867 /* We cannot do anything. */
15872 if (value == (unsigned) FAIL)
15875 *instruction &= OPCODE_MASK;
15876 *instruction |= new_inst << DATA_OP_SHIFT;
15880 /* Like negate_data_op, but for Thumb-2. */
15882 static unsigned int
15883 thumb32_negate_data_op (offsetT *instruction, offsetT value)
15887 offsetT negated, inverted;
15889 negated = encode_thumb32_immediate (-value);
15890 inverted = encode_thumb32_immediate (~value);
15892 rd = (*instruction >> 8) & 0xf;
15893 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15896 /* ADD <-> SUB. Includes CMP <-> CMN. */
15897 case T2_OPCODE_SUB:
15898 new_inst = T2_OPCODE_ADD;
15902 case T2_OPCODE_ADD:
15903 new_inst = T2_OPCODE_SUB;
15907 /* ORR <-> ORN. Includes MOV <-> MVN. */
15908 case T2_OPCODE_ORR:
15909 new_inst = T2_OPCODE_ORN;
15913 case T2_OPCODE_ORN:
15914 new_inst = T2_OPCODE_ORR;
15918 /* AND <-> BIC. TST has no inverted equivalent. */
15919 case T2_OPCODE_AND:
15920 new_inst = T2_OPCODE_BIC;
15927 case T2_OPCODE_BIC:
15928 new_inst = T2_OPCODE_AND;
15933 case T2_OPCODE_ADC:
15934 new_inst = T2_OPCODE_SBC;
15938 case T2_OPCODE_SBC:
15939 new_inst = T2_OPCODE_ADC;
15943 /* We cannot do anything. */
15951 *instruction &= T2_OPCODE_MASK;
15952 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15956 /* Read a 32-bit thumb instruction from buf. */
15957 static unsigned long
15958 get_thumb32_insn (char * buf)
15960 unsigned long insn;
15961 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15962 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15968 /* We usually want to set the low bit on the address of thumb function
15969 symbols. In particular .word foo - . should have the low bit set.
15970 Generic code tries to fold the difference of two symbols to
15971 a constant. Prevent this and force a relocation when the first symbols
15972 is a thumb function. */
15974 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
15976 if (op == O_subtract
15977 && l->X_op == O_symbol
15978 && r->X_op == O_symbol
15979 && THUMB_IS_FUNC (l->X_add_symbol))
15981 l->X_op = O_subtract;
15982 l->X_op_symbol = r->X_add_symbol;
15983 l->X_add_number -= r->X_add_number;
15986 /* Process as normal. */
15991 md_apply_fix (fixS * fixP,
15995 offsetT value = * valP;
15997 unsigned int newimm;
15998 unsigned long temp;
16000 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
16002 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
16004 /* Note whether this will delete the relocation. */
16005 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
16008 /* On a 64-bit host, silently truncate 'value' to 32 bits for
16009 consistency with the behavior on 32-bit hosts. Remember value
16011 value &= 0xffffffff;
16012 value ^= 0x80000000;
16013 value -= 0x80000000;
16016 fixP->fx_addnumber = value;
16018 /* Same treatment for fixP->fx_offset. */
16019 fixP->fx_offset &= 0xffffffff;
16020 fixP->fx_offset ^= 0x80000000;
16021 fixP->fx_offset -= 0x80000000;
16023 switch (fixP->fx_r_type)
16025 case BFD_RELOC_NONE:
16026 /* This will need to go in the object file. */
16030 case BFD_RELOC_ARM_IMMEDIATE:
16031 /* We claim that this fixup has been processed here,
16032 even if in fact we generate an error because we do
16033 not have a reloc for it, so tc_gen_reloc will reject it. */
16037 && ! S_IS_DEFINED (fixP->fx_addsy))
16039 as_bad_where (fixP->fx_file, fixP->fx_line,
16040 _("undefined symbol %s used as an immediate value"),
16041 S_GET_NAME (fixP->fx_addsy));
16045 newimm = encode_arm_immediate (value);
16046 temp = md_chars_to_number (buf, INSN_SIZE);
16048 /* If the instruction will fail, see if we can fix things up by
16049 changing the opcode. */
16050 if (newimm == (unsigned int) FAIL
16051 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
16053 as_bad_where (fixP->fx_file, fixP->fx_line,
16054 _("invalid constant (%lx) after fixup"),
16055 (unsigned long) value);
16059 newimm |= (temp & 0xfffff000);
16060 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16063 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16065 unsigned int highpart = 0;
16066 unsigned int newinsn = 0xe1a00000; /* nop. */
16068 newimm = encode_arm_immediate (value);
16069 temp = md_chars_to_number (buf, INSN_SIZE);
16071 /* If the instruction will fail, see if we can fix things up by
16072 changing the opcode. */
16073 if (newimm == (unsigned int) FAIL
16074 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
16076 /* No ? OK - try using two ADD instructions to generate
16078 newimm = validate_immediate_twopart (value, & highpart);
16080 /* Yes - then make sure that the second instruction is
16082 if (newimm != (unsigned int) FAIL)
16084 /* Still No ? Try using a negated value. */
16085 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
16086 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
16087 /* Otherwise - give up. */
16090 as_bad_where (fixP->fx_file, fixP->fx_line,
16091 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
16096 /* Replace the first operand in the 2nd instruction (which
16097 is the PC) with the destination register. We have
16098 already added in the PC in the first instruction and we
16099 do not want to do it again. */
16100 newinsn &= ~ 0xf0000;
16101 newinsn |= ((newinsn & 0x0f000) << 4);
16104 newimm |= (temp & 0xfffff000);
16105 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
16107 highpart |= (newinsn & 0xfffff000);
16108 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
16112 case BFD_RELOC_ARM_OFFSET_IMM:
16113 if (!fixP->fx_done && seg->use_rela_p)
16116 case BFD_RELOC_ARM_LITERAL:
16122 if (validate_offset_imm (value, 0) == FAIL)
16124 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
16125 as_bad_where (fixP->fx_file, fixP->fx_line,
16126 _("invalid literal constant: pool needs to be closer"));
16128 as_bad_where (fixP->fx_file, fixP->fx_line,
16129 _("bad immediate value for offset (%ld)"),
16134 newval = md_chars_to_number (buf, INSN_SIZE);
16135 newval &= 0xff7ff000;
16136 newval |= value | (sign ? INDEX_UP : 0);
16137 md_number_to_chars (buf, newval, INSN_SIZE);
16140 case BFD_RELOC_ARM_OFFSET_IMM8:
16141 case BFD_RELOC_ARM_HWLITERAL:
16147 if (validate_offset_imm (value, 1) == FAIL)
16149 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
16150 as_bad_where (fixP->fx_file, fixP->fx_line,
16151 _("invalid literal constant: pool needs to be closer"));
16153 as_bad (_("bad immediate value for half-word offset (%ld)"),
16158 newval = md_chars_to_number (buf, INSN_SIZE);
16159 newval &= 0xff7ff0f0;
16160 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
16161 md_number_to_chars (buf, newval, INSN_SIZE);
16164 case BFD_RELOC_ARM_T32_OFFSET_U8:
16165 if (value < 0 || value > 1020 || value % 4 != 0)
16166 as_bad_where (fixP->fx_file, fixP->fx_line,
16167 _("bad immediate value for offset (%ld)"), (long) value);
16170 newval = md_chars_to_number (buf+2, THUMB_SIZE);
16172 md_number_to_chars (buf+2, newval, THUMB_SIZE);
16175 case BFD_RELOC_ARM_T32_OFFSET_IMM:
16176 /* This is a complicated relocation used for all varieties of Thumb32
16177 load/store instruction with immediate offset:
16179 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
16180 *4, optional writeback(W)
16181 (doubleword load/store)
16183 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
16184 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
16185 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
16186 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
16187 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
16189 Uppercase letters indicate bits that are already encoded at
16190 this point. Lowercase letters are our problem. For the
16191 second block of instructions, the secondary opcode nybble
16192 (bits 8..11) is present, and bit 23 is zero, even if this is
16193 a PC-relative operation. */
16194 newval = md_chars_to_number (buf, THUMB_SIZE);
16196 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
16198 if ((newval & 0xf0000000) == 0xe0000000)
16200 /* Doubleword load/store: 8-bit offset, scaled by 4. */
16202 newval |= (1 << 23);
16205 if (value % 4 != 0)
16207 as_bad_where (fixP->fx_file, fixP->fx_line,
16208 _("offset not a multiple of 4"));
16214 as_bad_where (fixP->fx_file, fixP->fx_line,
16215 _("offset out of range"));
16220 else if ((newval & 0x000f0000) == 0x000f0000)
16222 /* PC-relative, 12-bit offset. */
16224 newval |= (1 << 23);
16229 as_bad_where (fixP->fx_file, fixP->fx_line,
16230 _("offset out of range"));
16235 else if ((newval & 0x00000100) == 0x00000100)
16237 /* Writeback: 8-bit, +/- offset. */
16239 newval |= (1 << 9);
16244 as_bad_where (fixP->fx_file, fixP->fx_line,
16245 _("offset out of range"));
16250 else if ((newval & 0x00000f00) == 0x00000e00)
16252 /* T-instruction: positive 8-bit offset. */
16253 if (value < 0 || value > 0xff)
16255 as_bad_where (fixP->fx_file, fixP->fx_line,
16256 _("offset out of range"));
16264 /* Positive 12-bit or negative 8-bit offset. */
16268 newval |= (1 << 23);
16278 as_bad_where (fixP->fx_file, fixP->fx_line,
16279 _("offset out of range"));
16286 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
16287 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
16290 case BFD_RELOC_ARM_SHIFT_IMM:
16291 newval = md_chars_to_number (buf, INSN_SIZE);
16292 if (((unsigned long) value) > 32
16294 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
16296 as_bad_where (fixP->fx_file, fixP->fx_line,
16297 _("shift expression is too large"));
16302 /* Shifts of zero must be done as lsl. */
16304 else if (value == 32)
16306 newval &= 0xfffff07f;
16307 newval |= (value & 0x1f) << 7;
16308 md_number_to_chars (buf, newval, INSN_SIZE);
16311 case BFD_RELOC_ARM_T32_IMMEDIATE:
16312 case BFD_RELOC_ARM_T32_IMM12:
16313 case BFD_RELOC_ARM_T32_ADD_PC12:
16314 /* We claim that this fixup has been processed here,
16315 even if in fact we generate an error because we do
16316 not have a reloc for it, so tc_gen_reloc will reject it. */
16320 && ! S_IS_DEFINED (fixP->fx_addsy))
16322 as_bad_where (fixP->fx_file, fixP->fx_line,
16323 _("undefined symbol %s used as an immediate value"),
16324 S_GET_NAME (fixP->fx_addsy));
16328 newval = md_chars_to_number (buf, THUMB_SIZE);
16330 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
16332 /* FUTURE: Implement analogue of negate_data_op for T32. */
16333 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
16335 newimm = encode_thumb32_immediate (value);
16336 if (newimm == (unsigned int) FAIL)
16337 newimm = thumb32_negate_data_op (&newval, value);
16341 /* 12 bit immediate for addw/subw. */
16345 newval ^= 0x00a00000;
16348 newimm = (unsigned int) FAIL;
16353 if (newimm == (unsigned int)FAIL)
16355 as_bad_where (fixP->fx_file, fixP->fx_line,
16356 _("invalid constant (%lx) after fixup"),
16357 (unsigned long) value);
16361 newval |= (newimm & 0x800) << 15;
16362 newval |= (newimm & 0x700) << 4;
16363 newval |= (newimm & 0x0ff);
16365 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
16366 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
16369 case BFD_RELOC_ARM_SMC:
16370 if (((unsigned long) value) > 0xffff)
16371 as_bad_where (fixP->fx_file, fixP->fx_line,
16372 _("invalid smc expression"));
16373 newval = md_chars_to_number (buf, INSN_SIZE);
16374 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
16375 md_number_to_chars (buf, newval, INSN_SIZE);
16378 case BFD_RELOC_ARM_SWI:
16379 if (fixP->tc_fix_data != 0)
16381 if (((unsigned long) value) > 0xff)
16382 as_bad_where (fixP->fx_file, fixP->fx_line,
16383 _("invalid swi expression"));
16384 newval = md_chars_to_number (buf, THUMB_SIZE);
16386 md_number_to_chars (buf, newval, THUMB_SIZE);
16390 if (((unsigned long) value) > 0x00ffffff)
16391 as_bad_where (fixP->fx_file, fixP->fx_line,
16392 _("invalid swi expression"));
16393 newval = md_chars_to_number (buf, INSN_SIZE);
16395 md_number_to_chars (buf, newval, INSN_SIZE);
16399 case BFD_RELOC_ARM_MULTI:
16400 if (((unsigned long) value) > 0xffff)
16401 as_bad_where (fixP->fx_file, fixP->fx_line,
16402 _("invalid expression in load/store multiple"));
16403 newval = value | md_chars_to_number (buf, INSN_SIZE);
16404 md_number_to_chars (buf, newval, INSN_SIZE);
16408 case BFD_RELOC_ARM_PCREL_CALL:
16409 newval = md_chars_to_number (buf, INSN_SIZE);
16410 if ((newval & 0xf0000000) == 0xf0000000)
16414 goto arm_branch_common;
16416 case BFD_RELOC_ARM_PCREL_JUMP:
16417 case BFD_RELOC_ARM_PLT32:
16419 case BFD_RELOC_ARM_PCREL_BRANCH:
16421 goto arm_branch_common;
16423 case BFD_RELOC_ARM_PCREL_BLX:
16426 /* We are going to store value (shifted right by two) in the
16427 instruction, in a 24 bit, signed field. Bits 26 through 32 either
16428 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
16429 also be be clear. */
16431 as_bad_where (fixP->fx_file, fixP->fx_line,
16432 _("misaligned branch destination"));
16433 if ((value & (offsetT)0xfe000000) != (offsetT)0
16434 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
16435 as_bad_where (fixP->fx_file, fixP->fx_line,
16436 _("branch out of range"));
16438 if (fixP->fx_done || !seg->use_rela_p)
16440 newval = md_chars_to_number (buf, INSN_SIZE);
16441 newval |= (value >> 2) & 0x00ffffff;
16442 /* Set the H bit on BLX instructions. */
16446 newval |= 0x01000000;
16448 newval &= ~0x01000000;
16450 md_number_to_chars (buf, newval, INSN_SIZE);
16454 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
16455 /* CZB can only branch forward. */
16457 as_bad_where (fixP->fx_file, fixP->fx_line,
16458 _("branch out of range"));
16460 if (fixP->fx_done || !seg->use_rela_p)
16462 newval = md_chars_to_number (buf, THUMB_SIZE);
16463 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
16464 md_number_to_chars (buf, newval, THUMB_SIZE);
16468 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
16469 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
16470 as_bad_where (fixP->fx_file, fixP->fx_line,
16471 _("branch out of range"));
16473 if (fixP->fx_done || !seg->use_rela_p)
16475 newval = md_chars_to_number (buf, THUMB_SIZE);
16476 newval |= (value & 0x1ff) >> 1;
16477 md_number_to_chars (buf, newval, THUMB_SIZE);
16481 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
16482 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
16483 as_bad_where (fixP->fx_file, fixP->fx_line,
16484 _("branch out of range"));
16486 if (fixP->fx_done || !seg->use_rela_p)
16488 newval = md_chars_to_number (buf, THUMB_SIZE);
16489 newval |= (value & 0xfff) >> 1;
16490 md_number_to_chars (buf, newval, THUMB_SIZE);
16494 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16495 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
16496 as_bad_where (fixP->fx_file, fixP->fx_line,
16497 _("conditional branch out of range"));
16499 if (fixP->fx_done || !seg->use_rela_p)
16502 addressT S, J1, J2, lo, hi;
16504 S = (value & 0x00100000) >> 20;
16505 J2 = (value & 0x00080000) >> 19;
16506 J1 = (value & 0x00040000) >> 18;
16507 hi = (value & 0x0003f000) >> 12;
16508 lo = (value & 0x00000ffe) >> 1;
16510 newval = md_chars_to_number (buf, THUMB_SIZE);
16511 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16512 newval |= (S << 10) | hi;
16513 newval2 |= (J1 << 13) | (J2 << 11) | lo;
16514 md_number_to_chars (buf, newval, THUMB_SIZE);
16515 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16519 case BFD_RELOC_THUMB_PCREL_BLX:
16520 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16521 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
16522 as_bad_where (fixP->fx_file, fixP->fx_line,
16523 _("branch out of range"));
16525 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
16526 /* For a BLX instruction, make sure that the relocation is rounded up
16527 to a word boundary. This follows the semantics of the instruction
16528 which specifies that bit 1 of the target address will come from bit
16529 1 of the base address. */
16530 value = (value + 1) & ~ 1;
16532 if (fixP->fx_done || !seg->use_rela_p)
16536 newval = md_chars_to_number (buf, THUMB_SIZE);
16537 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16538 newval |= (value & 0x7fffff) >> 12;
16539 newval2 |= (value & 0xfff) >> 1;
16540 md_number_to_chars (buf, newval, THUMB_SIZE);
16541 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16545 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16546 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
16547 as_bad_where (fixP->fx_file, fixP->fx_line,
16548 _("branch out of range"));
16550 if (fixP->fx_done || !seg->use_rela_p)
16553 addressT S, I1, I2, lo, hi;
16555 S = (value & 0x01000000) >> 24;
16556 I1 = (value & 0x00800000) >> 23;
16557 I2 = (value & 0x00400000) >> 22;
16558 hi = (value & 0x003ff000) >> 12;
16559 lo = (value & 0x00000ffe) >> 1;
16564 newval = md_chars_to_number (buf, THUMB_SIZE);
16565 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
16566 newval |= (S << 10) | hi;
16567 newval2 |= (I1 << 13) | (I2 << 11) | lo;
16568 md_number_to_chars (buf, newval, THUMB_SIZE);
16569 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
16574 if (fixP->fx_done || !seg->use_rela_p)
16575 md_number_to_chars (buf, value, 1);
16579 if (fixP->fx_done || !seg->use_rela_p)
16580 md_number_to_chars (buf, value, 2);
16584 case BFD_RELOC_ARM_TLS_GD32:
16585 case BFD_RELOC_ARM_TLS_LE32:
16586 case BFD_RELOC_ARM_TLS_IE32:
16587 case BFD_RELOC_ARM_TLS_LDM32:
16588 case BFD_RELOC_ARM_TLS_LDO32:
16589 S_SET_THREAD_LOCAL (fixP->fx_addsy);
16592 case BFD_RELOC_ARM_GOT32:
16593 case BFD_RELOC_ARM_GOTOFF:
16594 case BFD_RELOC_ARM_TARGET2:
16595 if (fixP->fx_done || !seg->use_rela_p)
16596 md_number_to_chars (buf, 0, 4);
16600 case BFD_RELOC_RVA:
16602 case BFD_RELOC_ARM_TARGET1:
16603 case BFD_RELOC_ARM_ROSEGREL32:
16604 case BFD_RELOC_ARM_SBREL32:
16605 case BFD_RELOC_32_PCREL:
16606 if (fixP->fx_done || !seg->use_rela_p)
16608 /* For WinCE we only do this for pcrel fixups. */
16609 if (fixP->fx_done || fixP->fx_pcrel)
16611 md_number_to_chars (buf, value, 4);
16615 case BFD_RELOC_ARM_PREL31:
16616 if (fixP->fx_done || !seg->use_rela_p)
16618 newval = md_chars_to_number (buf, 4) & 0x80000000;
16619 if ((value ^ (value >> 1)) & 0x40000000)
16621 as_bad_where (fixP->fx_file, fixP->fx_line,
16622 _("rel31 relocation overflow"));
16624 newval |= value & 0x7fffffff;
16625 md_number_to_chars (buf, newval, 4);
16630 case BFD_RELOC_ARM_CP_OFF_IMM:
16631 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
16632 if (value < -1023 || value > 1023 || (value & 3))
16633 as_bad_where (fixP->fx_file, fixP->fx_line,
16634 _("co-processor offset out of range"));
16639 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16640 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16641 newval = md_chars_to_number (buf, INSN_SIZE);
16643 newval = get_thumb32_insn (buf);
16644 newval &= 0xff7fff00;
16645 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
16647 newval &= ~WRITE_BACK;
16648 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
16649 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
16650 md_number_to_chars (buf, newval, INSN_SIZE);
16652 put_thumb32_insn (buf, newval);
16655 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
16656 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
16657 if (value < -255 || value > 255)
16658 as_bad_where (fixP->fx_file, fixP->fx_line,
16659 _("co-processor offset out of range"));
16661 goto cp_off_common;
16663 case BFD_RELOC_ARM_THUMB_OFFSET:
16664 newval = md_chars_to_number (buf, THUMB_SIZE);
16665 /* Exactly what ranges, and where the offset is inserted depends
16666 on the type of instruction, we can establish this from the
16668 switch (newval >> 12)
16670 case 4: /* PC load. */
16671 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
16672 forced to zero for these loads; md_pcrel_from has already
16673 compensated for this. */
16675 as_bad_where (fixP->fx_file, fixP->fx_line,
16676 _("invalid offset, target not word aligned (0x%08lX)"),
16677 (((unsigned long) fixP->fx_frag->fr_address
16678 + (unsigned long) fixP->fx_where) & ~3)
16679 + (unsigned long) value);
16681 if (value & ~0x3fc)
16682 as_bad_where (fixP->fx_file, fixP->fx_line,
16683 _("invalid offset, value too big (0x%08lX)"),
16686 newval |= value >> 2;
16689 case 9: /* SP load/store. */
16690 if (value & ~0x3fc)
16691 as_bad_where (fixP->fx_file, fixP->fx_line,
16692 _("invalid offset, value too big (0x%08lX)"),
16694 newval |= value >> 2;
16697 case 6: /* Word load/store. */
16699 as_bad_where (fixP->fx_file, fixP->fx_line,
16700 _("invalid offset, value too big (0x%08lX)"),
16702 newval |= value << 4; /* 6 - 2. */
16705 case 7: /* Byte load/store. */
16707 as_bad_where (fixP->fx_file, fixP->fx_line,
16708 _("invalid offset, value too big (0x%08lX)"),
16710 newval |= value << 6;
16713 case 8: /* Halfword load/store. */
16715 as_bad_where (fixP->fx_file, fixP->fx_line,
16716 _("invalid offset, value too big (0x%08lX)"),
16718 newval |= value << 5; /* 6 - 1. */
16722 as_bad_where (fixP->fx_file, fixP->fx_line,
16723 "Unable to process relocation for thumb opcode: %lx",
16724 (unsigned long) newval);
16727 md_number_to_chars (buf, newval, THUMB_SIZE);
16730 case BFD_RELOC_ARM_THUMB_ADD:
16731 /* This is a complicated relocation, since we use it for all of
16732 the following immediate relocations:
16736 9bit ADD/SUB SP word-aligned
16737 10bit ADD PC/SP word-aligned
16739 The type of instruction being processed is encoded in the
16746 newval = md_chars_to_number (buf, THUMB_SIZE);
16748 int rd = (newval >> 4) & 0xf;
16749 int rs = newval & 0xf;
16750 int subtract = !!(newval & 0x8000);
16752 /* Check for HI regs, only very restricted cases allowed:
16753 Adjusting SP, and using PC or SP to get an address. */
16754 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16755 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16756 as_bad_where (fixP->fx_file, fixP->fx_line,
16757 _("invalid Hi register with immediate"));
16759 /* If value is negative, choose the opposite instruction. */
16763 subtract = !subtract;
16765 as_bad_where (fixP->fx_file, fixP->fx_line,
16766 _("immediate value out of range"));
16771 if (value & ~0x1fc)
16772 as_bad_where (fixP->fx_file, fixP->fx_line,
16773 _("invalid immediate for stack address calculation"));
16774 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16775 newval |= value >> 2;
16777 else if (rs == REG_PC || rs == REG_SP)
16779 if (subtract || value & ~0x3fc)
16780 as_bad_where (fixP->fx_file, fixP->fx_line,
16781 _("invalid immediate for address calculation (value = 0x%08lX)"),
16782 (unsigned long) value);
16783 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16785 newval |= value >> 2;
16790 as_bad_where (fixP->fx_file, fixP->fx_line,
16791 _("immediate value out of range"));
16792 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16793 newval |= (rd << 8) | value;
16798 as_bad_where (fixP->fx_file, fixP->fx_line,
16799 _("immediate value out of range"));
16800 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16801 newval |= rd | (rs << 3) | (value << 6);
16804 md_number_to_chars (buf, newval, THUMB_SIZE);
16807 case BFD_RELOC_ARM_THUMB_IMM:
16808 newval = md_chars_to_number (buf, THUMB_SIZE);
16809 if (value < 0 || value > 255)
16810 as_bad_where (fixP->fx_file, fixP->fx_line,
16811 _("invalid immediate: %ld is too large"),
16814 md_number_to_chars (buf, newval, THUMB_SIZE);
16817 case BFD_RELOC_ARM_THUMB_SHIFT:
16818 /* 5bit shift value (0..32). LSL cannot take 32. */
16819 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16820 temp = newval & 0xf800;
16821 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16822 as_bad_where (fixP->fx_file, fixP->fx_line,
16823 _("invalid shift value: %ld"), (long) value);
16824 /* Shifts of zero must be encoded as LSL. */
16826 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16827 /* Shifts of 32 are encoded as zero. */
16828 else if (value == 32)
16830 newval |= value << 6;
16831 md_number_to_chars (buf, newval, THUMB_SIZE);
16834 case BFD_RELOC_VTABLE_INHERIT:
16835 case BFD_RELOC_VTABLE_ENTRY:
16839 case BFD_RELOC_ARM_MOVW:
16840 case BFD_RELOC_ARM_MOVT:
16841 case BFD_RELOC_ARM_THUMB_MOVW:
16842 case BFD_RELOC_ARM_THUMB_MOVT:
16843 if (fixP->fx_done || !seg->use_rela_p)
16845 /* REL format relocations are limited to a 16-bit addend. */
16846 if (!fixP->fx_done)
16848 if (value < -0x1000 || value > 0xffff)
16849 as_bad_where (fixP->fx_file, fixP->fx_line,
16850 _("offset too big"));
16852 else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
16853 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
16858 if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
16859 || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
16861 newval = get_thumb32_insn (buf);
16862 newval &= 0xfbf08f00;
16863 newval |= (value & 0xf000) << 4;
16864 newval |= (value & 0x0800) << 15;
16865 newval |= (value & 0x0700) << 4;
16866 newval |= (value & 0x00ff);
16867 put_thumb32_insn (buf, newval);
16871 newval = md_chars_to_number (buf, 4);
16872 newval &= 0xfff0f000;
16873 newval |= value & 0x0fff;
16874 newval |= (value & 0xf000) << 4;
16875 md_number_to_chars (buf, newval, 4);
16880 case BFD_RELOC_UNUSED:
16882 as_bad_where (fixP->fx_file, fixP->fx_line,
16883 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16887 /* Translate internal representation of relocation info to BFD target
16891 tc_gen_reloc (asection *section, fixS *fixp)
16894 bfd_reloc_code_real_type code;
16896 reloc = xmalloc (sizeof (arelent));
16898 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16899 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16900 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16902 if (fixp->fx_pcrel)
16904 if (section->use_rela_p)
16905 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16907 fixp->fx_offset = reloc->address;
16909 reloc->addend = fixp->fx_offset;
16911 switch (fixp->fx_r_type)
16914 if (fixp->fx_pcrel)
16916 code = BFD_RELOC_8_PCREL;
16921 if (fixp->fx_pcrel)
16923 code = BFD_RELOC_16_PCREL;
16928 if (fixp->fx_pcrel)
16930 code = BFD_RELOC_32_PCREL;
16934 case BFD_RELOC_ARM_MOVW:
16935 if (fixp->fx_pcrel)
16937 code = BFD_RELOC_ARM_MOVW_PCREL;
16941 case BFD_RELOC_ARM_MOVT:
16942 if (fixp->fx_pcrel)
16944 code = BFD_RELOC_ARM_MOVT_PCREL;
16948 case BFD_RELOC_ARM_THUMB_MOVW:
16949 if (fixp->fx_pcrel)
16951 code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
16955 case BFD_RELOC_ARM_THUMB_MOVT:
16956 if (fixp->fx_pcrel)
16958 code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
16962 case BFD_RELOC_NONE:
16963 case BFD_RELOC_ARM_PCREL_BRANCH:
16964 case BFD_RELOC_ARM_PCREL_BLX:
16965 case BFD_RELOC_RVA:
16966 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16967 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16968 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16969 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16970 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16971 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16972 case BFD_RELOC_THUMB_PCREL_BLX:
16973 case BFD_RELOC_VTABLE_ENTRY:
16974 case BFD_RELOC_VTABLE_INHERIT:
16975 code = fixp->fx_r_type;
16978 case BFD_RELOC_ARM_LITERAL:
16979 case BFD_RELOC_ARM_HWLITERAL:
16980 /* If this is called then the a literal has
16981 been referenced across a section boundary. */
16982 as_bad_where (fixp->fx_file, fixp->fx_line,
16983 _("literal referenced across section boundary"));
16987 case BFD_RELOC_ARM_GOT32:
16988 case BFD_RELOC_ARM_GOTOFF:
16989 case BFD_RELOC_ARM_PLT32:
16990 case BFD_RELOC_ARM_TARGET1:
16991 case BFD_RELOC_ARM_ROSEGREL32:
16992 case BFD_RELOC_ARM_SBREL32:
16993 case BFD_RELOC_ARM_PREL31:
16994 case BFD_RELOC_ARM_TARGET2:
16995 case BFD_RELOC_ARM_TLS_LE32:
16996 case BFD_RELOC_ARM_TLS_LDO32:
16997 case BFD_RELOC_ARM_PCREL_CALL:
16998 case BFD_RELOC_ARM_PCREL_JUMP:
16999 code = fixp->fx_r_type;
17002 case BFD_RELOC_ARM_TLS_GD32:
17003 case BFD_RELOC_ARM_TLS_IE32:
17004 case BFD_RELOC_ARM_TLS_LDM32:
17005 /* BFD will include the symbol's address in the addend.
17006 But we don't want that, so subtract it out again here. */
17007 if (!S_IS_COMMON (fixp->fx_addsy))
17008 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
17009 code = fixp->fx_r_type;
17013 case BFD_RELOC_ARM_IMMEDIATE:
17014 as_bad_where (fixp->fx_file, fixp->fx_line,
17015 _("internal relocation (type: IMMEDIATE) not fixed up"));
17018 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
17019 as_bad_where (fixp->fx_file, fixp->fx_line,
17020 _("ADRL used for a symbol not defined in the same file"));
17023 case BFD_RELOC_ARM_OFFSET_IMM:
17024 if (section->use_rela_p)
17026 code = fixp->fx_r_type;
17030 if (fixp->fx_addsy != NULL
17031 && !S_IS_DEFINED (fixp->fx_addsy)
17032 && S_IS_LOCAL (fixp->fx_addsy))
17034 as_bad_where (fixp->fx_file, fixp->fx_line,
17035 _("undefined local label `%s'"),
17036 S_GET_NAME (fixp->fx_addsy));
17040 as_bad_where (fixp->fx_file, fixp->fx_line,
17041 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
17048 switch (fixp->fx_r_type)
17050 case BFD_RELOC_NONE: type = "NONE"; break;
17051 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
17052 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
17053 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
17054 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
17055 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
17056 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
17057 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
17058 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
17059 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
17060 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
17061 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
17062 default: type = _("<unknown>"); break;
17064 as_bad_where (fixp->fx_file, fixp->fx_line,
17065 _("cannot represent %s relocation in this object file format"),
17072 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
17074 && fixp->fx_addsy == GOT_symbol)
17076 code = BFD_RELOC_ARM_GOTPC;
17077 reloc->addend = fixp->fx_offset = reloc->address;
17081 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
17083 if (reloc->howto == NULL)
17085 as_bad_where (fixp->fx_file, fixp->fx_line,
17086 _("cannot represent %s relocation in this object file format"),
17087 bfd_get_reloc_code_name (code));
17091 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
17092 vtable entry to be used in the relocation's section offset. */
17093 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17094 reloc->address = fixp->fx_offset;
17099 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
17102 cons_fix_new_arm (fragS * frag,
17107 bfd_reloc_code_real_type type;
17111 FIXME: @@ Should look at CPU word size. */
17115 type = BFD_RELOC_8;
17118 type = BFD_RELOC_16;
17122 type = BFD_RELOC_32;
17125 type = BFD_RELOC_64;
17129 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
17132 #if defined OBJ_COFF || defined OBJ_ELF
17134 arm_validate_fix (fixS * fixP)
17136 /* If the destination of the branch is a defined symbol which does not have
17137 the THUMB_FUNC attribute, then we must be calling a function which has
17138 the (interfacearm) attribute. We look for the Thumb entry point to that
17139 function and change the branch to refer to that function instead. */
17140 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
17141 && fixP->fx_addsy != NULL
17142 && S_IS_DEFINED (fixP->fx_addsy)
17143 && ! THUMB_IS_FUNC (fixP->fx_addsy))
17145 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
17151 arm_force_relocation (struct fix * fixp)
17153 #if defined (OBJ_COFF) && defined (TE_PE)
17154 if (fixp->fx_r_type == BFD_RELOC_RVA)
17158 /* Resolve these relocations even if the symbol is extern or weak. */
17159 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
17160 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
17161 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
17162 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
17163 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
17164 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
17167 return generic_force_reloc (fixp);
17172 arm_fix_adjustable (fixS * fixP)
17174 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
17175 local labels from being added to the output symbol table when they
17176 are used with the ADRL pseudo op. The ADRL relocation should always
17177 be resolved before the binbary is emitted, so it is safe to say that
17178 it is adjustable. */
17179 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
17182 /* This is a hack for the gas/all/redef2.s test. This test causes symbols
17183 to be cloned, and without this test relocs would still be generated
17184 against the original, pre-cloned symbol. Such symbols would not appear
17185 in the symbol table however, and so a valid reloc could not be
17186 generated. So check to see if the fixup is against a symbol which has
17187 been removed from the symbol chain, and if it is, then allow it to be
17188 adjusted into a reloc against a section symbol. */
17189 if (fixP->fx_addsy != NULL
17190 && ! S_IS_LOCAL (fixP->fx_addsy)
17191 && symbol_next (fixP->fx_addsy) == NULL
17192 && symbol_next (fixP->fx_addsy) == symbol_previous (fixP->fx_addsy))
17200 /* Relocations against function names must be left unadjusted,
17201 so that the linker can use this information to generate interworking
17202 stubs. The MIPS version of this function
17203 also prevents relocations that are mips-16 specific, but I do not
17204 know why it does this.
17207 There is one other problem that ought to be addressed here, but
17208 which currently is not: Taking the address of a label (rather
17209 than a function) and then later jumping to that address. Such
17210 addresses also ought to have their bottom bit set (assuming that
17211 they reside in Thumb code), but at the moment they will not. */
17214 arm_fix_adjustable (fixS * fixP)
17216 if (fixP->fx_addsy == NULL)
17219 /* Preserve relocations against symbols with function type. */
17220 if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
17223 if (THUMB_IS_FUNC (fixP->fx_addsy)
17224 && fixP->fx_subsy == NULL)
17227 /* We need the symbol name for the VTABLE entries. */
17228 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
17229 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
17232 /* Don't allow symbols to be discarded on GOT related relocs. */
17233 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
17234 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
17235 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
17236 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
17237 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
17238 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
17239 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
17240 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
17241 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
17248 elf32_arm_target_format (void)
17251 return (target_big_endian
17252 ? "elf32-bigarm-symbian"
17253 : "elf32-littlearm-symbian");
17254 #elif defined (TE_VXWORKS)
17255 return (target_big_endian
17256 ? "elf32-bigarm-vxworks"
17257 : "elf32-littlearm-vxworks");
17259 if (target_big_endian)
17260 return "elf32-bigarm";
17262 return "elf32-littlearm";
17267 armelf_frob_symbol (symbolS * symp,
17270 elf_frob_symbol (symp, puntp);
17274 /* MD interface: Finalization. */
17276 /* A good place to do this, although this was probably not intended
17277 for this kind of use. We need to dump the literal pool before
17278 references are made to a null symbol pointer. */
17283 literal_pool * pool;
17285 for (pool = list_of_pools; pool; pool = pool->next)
17287 /* Put it at the end of the relevent section. */
17288 subseg_set (pool->section, pool->sub_section);
17290 arm_elf_change_section ();
17296 /* Adjust the symbol table. This marks Thumb symbols as distinct from
17300 arm_adjust_symtab (void)
17305 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17307 if (ARM_IS_THUMB (sym))
17309 if (THUMB_IS_FUNC (sym))
17311 /* Mark the symbol as a Thumb function. */
17312 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
17313 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
17314 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
17316 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
17317 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
17319 as_bad (_("%s: unexpected function type: %d"),
17320 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
17322 else switch (S_GET_STORAGE_CLASS (sym))
17325 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
17328 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
17331 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
17339 if (ARM_IS_INTERWORK (sym))
17340 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
17347 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
17349 if (ARM_IS_THUMB (sym))
17351 elf_symbol_type * elf_sym;
17353 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
17354 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
17356 if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
17357 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
17359 /* If it's a .thumb_func, declare it as so,
17360 otherwise tag label as .code 16. */
17361 if (THUMB_IS_FUNC (sym))
17362 elf_sym->internal_elf_sym.st_info =
17363 ELF_ST_INFO (bind, STT_ARM_TFUNC);
17365 elf_sym->internal_elf_sym.st_info =
17366 ELF_ST_INFO (bind, STT_ARM_16BIT);
17373 /* MD interface: Initialization. */
17376 set_constant_flonums (void)
17380 for (i = 0; i < NUM_FLOAT_VALS; i++)
17381 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
17391 if ( (arm_ops_hsh = hash_new ()) == NULL
17392 || (arm_cond_hsh = hash_new ()) == NULL
17393 || (arm_shift_hsh = hash_new ()) == NULL
17394 || (arm_psr_hsh = hash_new ()) == NULL
17395 || (arm_v7m_psr_hsh = hash_new ()) == NULL
17396 || (arm_reg_hsh = hash_new ()) == NULL
17397 || (arm_reloc_hsh = hash_new ()) == NULL
17398 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
17399 as_fatal (_("virtual memory exhausted"));
17401 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
17402 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
17403 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
17404 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
17405 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
17406 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
17407 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
17408 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
17409 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
17410 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
17411 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
17412 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
17414 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
17416 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
17417 (PTR) (barrier_opt_names + i));
17419 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
17420 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
17423 set_constant_flonums ();
17425 /* Set the cpu variant based on the command-line options. We prefer
17426 -mcpu= over -march= if both are set (as for GCC); and we prefer
17427 -mfpu= over any other way of setting the floating point unit.
17428 Use of legacy options with new options are faulted. */
17431 if (mcpu_cpu_opt || march_cpu_opt)
17432 as_bad (_("use of old and new-style options to set CPU type"));
17434 mcpu_cpu_opt = legacy_cpu;
17436 else if (!mcpu_cpu_opt)
17437 mcpu_cpu_opt = march_cpu_opt;
17442 as_bad (_("use of old and new-style options to set FPU type"));
17444 mfpu_opt = legacy_fpu;
17446 else if (!mfpu_opt)
17448 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
17449 /* Some environments specify a default FPU. If they don't, infer it
17450 from the processor. */
17452 mfpu_opt = mcpu_fpu_opt;
17454 mfpu_opt = march_fpu_opt;
17456 mfpu_opt = &fpu_default;
17463 mfpu_opt = &fpu_default;
17464 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
17465 mfpu_opt = &fpu_arch_vfp_v2;
17467 mfpu_opt = &fpu_arch_fpa;
17473 mcpu_cpu_opt = &cpu_default;
17474 selected_cpu = cpu_default;
17478 selected_cpu = *mcpu_cpu_opt;
17480 mcpu_cpu_opt = &arm_arch_any;
17483 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17485 arm_arch_used = thumb_arch_used = arm_arch_none;
17487 #if defined OBJ_COFF || defined OBJ_ELF
17489 unsigned int flags = 0;
17491 #if defined OBJ_ELF
17492 flags = meabi_flags;
17494 switch (meabi_flags)
17496 case EF_ARM_EABI_UNKNOWN:
17498 /* Set the flags in the private structure. */
17499 if (uses_apcs_26) flags |= F_APCS26;
17500 if (support_interwork) flags |= F_INTERWORK;
17501 if (uses_apcs_float) flags |= F_APCS_FLOAT;
17502 if (pic_code) flags |= F_PIC;
17503 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
17504 flags |= F_SOFT_FLOAT;
17506 switch (mfloat_abi_opt)
17508 case ARM_FLOAT_ABI_SOFT:
17509 case ARM_FLOAT_ABI_SOFTFP:
17510 flags |= F_SOFT_FLOAT;
17513 case ARM_FLOAT_ABI_HARD:
17514 if (flags & F_SOFT_FLOAT)
17515 as_bad (_("hard-float conflicts with specified fpu"));
17519 /* Using pure-endian doubles (even if soft-float). */
17520 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
17521 flags |= F_VFP_FLOAT;
17523 #if defined OBJ_ELF
17524 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
17525 flags |= EF_ARM_MAVERICK_FLOAT;
17528 case EF_ARM_EABI_VER4:
17529 case EF_ARM_EABI_VER5:
17530 /* No additional flags to set. */
17537 bfd_set_private_flags (stdoutput, flags);
17539 /* We have run out flags in the COFF header to encode the
17540 status of ATPCS support, so instead we create a dummy,
17541 empty, debug section called .arm.atpcs. */
17546 sec = bfd_make_section (stdoutput, ".arm.atpcs");
17550 bfd_set_section_flags
17551 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
17552 bfd_set_section_size (stdoutput, sec, 0);
17553 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
17559 /* Record the CPU type as well. */
17560 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
17561 mach = bfd_mach_arm_iWMMXt;
17562 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
17563 mach = bfd_mach_arm_XScale;
17564 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
17565 mach = bfd_mach_arm_ep9312;
17566 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
17567 mach = bfd_mach_arm_5TE;
17568 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
17570 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17571 mach = bfd_mach_arm_5T;
17573 mach = bfd_mach_arm_5;
17575 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
17577 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
17578 mach = bfd_mach_arm_4T;
17580 mach = bfd_mach_arm_4;
17582 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
17583 mach = bfd_mach_arm_3M;
17584 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
17585 mach = bfd_mach_arm_3;
17586 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
17587 mach = bfd_mach_arm_2a;
17588 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
17589 mach = bfd_mach_arm_2;
17591 mach = bfd_mach_arm_unknown;
17593 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
17596 /* Command line processing. */
17599 Invocation line includes a switch not recognized by the base assembler.
17600 See if it's a processor-specific option.
17602 This routine is somewhat complicated by the need for backwards
17603 compatibility (since older releases of gcc can't be changed).
17604 The new options try to make the interface as compatible as
17607 New options (supported) are:
17609 -mcpu=<cpu name> Assemble for selected processor
17610 -march=<architecture name> Assemble for selected architecture
17611 -mfpu=<fpu architecture> Assemble for selected FPU.
17612 -EB/-mbig-endian Big-endian
17613 -EL/-mlittle-endian Little-endian
17614 -k Generate PIC code
17615 -mthumb Start in Thumb mode
17616 -mthumb-interwork Code supports ARM/Thumb interworking
17618 For now we will also provide support for:
17620 -mapcs-32 32-bit Program counter
17621 -mapcs-26 26-bit Program counter
17622 -macps-float Floats passed in FP registers
17623 -mapcs-reentrant Reentrant code
17625 (sometime these will probably be replaced with -mapcs=<list of options>
17626 and -matpcs=<list of options>)
17628 The remaining options are only supported for back-wards compatibility.
17629 Cpu variants, the arm part is optional:
17630 -m[arm]1 Currently not supported.
17631 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
17632 -m[arm]3 Arm 3 processor
17633 -m[arm]6[xx], Arm 6 processors
17634 -m[arm]7[xx][t][[d]m] Arm 7 processors
17635 -m[arm]8[10] Arm 8 processors
17636 -m[arm]9[20][tdmi] Arm 9 processors
17637 -mstrongarm[110[0]] StrongARM processors
17638 -mxscale XScale processors
17639 -m[arm]v[2345[t[e]]] Arm architectures
17640 -mall All (except the ARM1)
17642 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
17643 -mfpe-old (No float load/store multiples)
17644 -mvfpxd VFP Single precision
17646 -mno-fpu Disable all floating point instructions
17648 The following CPU names are recognized:
17649 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
17650 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
17651 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
17652 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
17653 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
17654 arm10t arm10e, arm1020t, arm1020e, arm10200e,
17655 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
17659 const char * md_shortopts = "m:k";
17661 #ifdef ARM_BI_ENDIAN
17662 #define OPTION_EB (OPTION_MD_BASE + 0)
17663 #define OPTION_EL (OPTION_MD_BASE + 1)
17665 #if TARGET_BYTES_BIG_ENDIAN
17666 #define OPTION_EB (OPTION_MD_BASE + 0)
17668 #define OPTION_EL (OPTION_MD_BASE + 1)
17672 struct option md_longopts[] =
17675 {"EB", no_argument, NULL, OPTION_EB},
17678 {"EL", no_argument, NULL, OPTION_EL},
17680 {NULL, no_argument, NULL, 0}
17683 size_t md_longopts_size = sizeof (md_longopts);
17685 struct arm_option_table
17687 char *option; /* Option name to match. */
17688 char *help; /* Help information. */
17689 int *var; /* Variable to change. */
17690 int value; /* What to change it to. */
17691 char *deprecated; /* If non-null, print this message. */
17694 struct arm_option_table arm_opts[] =
17696 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
17697 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
17698 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
17699 &support_interwork, 1, NULL},
17700 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
17701 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
17702 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
17704 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
17705 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
17706 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
17707 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
17710 /* These are recognized by the assembler, but have no affect on code. */
17711 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
17712 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
17713 {NULL, NULL, NULL, 0, NULL}
17716 struct arm_legacy_option_table
17718 char *option; /* Option name to match. */
17719 const arm_feature_set **var; /* Variable to change. */
17720 const arm_feature_set value; /* What to change it to. */
17721 char *deprecated; /* If non-null, print this message. */
17724 const struct arm_legacy_option_table arm_legacy_opts[] =
17726 /* DON'T add any new processors to this list -- we want the whole list
17727 to go away... Add them to the processors table instead. */
17728 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17729 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
17730 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17731 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
17732 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17733 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
17734 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17735 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
17736 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17737 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
17738 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17739 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
17740 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17741 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
17742 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17743 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
17744 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17745 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
17746 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17747 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
17748 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17749 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
17750 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17751 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
17752 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17753 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
17754 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17755 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
17756 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17757 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
17758 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17759 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
17760 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17761 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
17762 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17763 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
17764 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17765 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
17766 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17767 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
17768 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17769 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
17770 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17771 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
17772 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17773 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
17774 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17775 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17776 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17777 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
17778 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17779 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
17780 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17781 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
17782 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17783 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
17784 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17785 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
17786 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17787 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
17788 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17789 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
17790 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17791 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
17792 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17793 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
17794 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17795 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
17796 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
17797 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
17798 N_("use -mcpu=strongarm110")},
17799 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
17800 N_("use -mcpu=strongarm1100")},
17801 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
17802 N_("use -mcpu=strongarm1110")},
17803 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17804 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17805 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17807 /* Architecture variants -- don't add any more to this list either. */
17808 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17809 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17810 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17811 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17812 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17813 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17814 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17815 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17816 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17817 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17818 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17819 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17820 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17821 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17822 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17823 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17824 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17825 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17827 /* Floating point variants -- don't add any more to this list either. */
17828 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17829 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17830 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17831 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17832 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17834 {NULL, NULL, ARM_ARCH_NONE, NULL}
17837 struct arm_cpu_option_table
17840 const arm_feature_set value;
17841 /* For some CPUs we assume an FPU unless the user explicitly sets
17843 const arm_feature_set default_fpu;
17844 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17846 const char *canonical_name;
17849 /* This list should, at a minimum, contain all the cpu names
17850 recognized by GCC. */
17851 static const struct arm_cpu_option_table arm_cpus[] =
17853 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17854 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17855 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17856 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17857 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17858 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17859 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17860 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17861 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17862 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17863 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17864 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17865 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17866 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17867 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17868 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17869 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17870 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17871 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17872 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17873 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17874 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17875 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17876 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17877 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17878 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17879 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17880 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17881 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17882 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17883 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17884 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17885 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17886 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17887 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17888 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17889 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17890 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17891 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17892 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17893 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17894 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17895 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17896 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17897 /* For V5 or later processors we default to using VFP; but the user
17898 should really set the FPU type explicitly. */
17899 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17900 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17901 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17902 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17903 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17904 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17905 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17906 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17907 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17908 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17909 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17910 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17911 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17912 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17913 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17914 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17915 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17916 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17917 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17918 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17919 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17920 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17921 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17922 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17923 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17924 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17925 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17926 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17927 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17928 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17929 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17930 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17931 | FPU_NEON_EXT_V1),
17933 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17934 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17935 /* ??? XSCALE is really an architecture. */
17936 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17937 /* ??? iwmmxt is not a processor. */
17938 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17939 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17941 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17942 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17945 struct arm_arch_option_table
17948 const arm_feature_set value;
17949 const arm_feature_set default_fpu;
17952 /* This list should, at a minimum, contain all the architecture names
17953 recognized by GCC. */
17954 static const struct arm_arch_option_table arm_archs[] =
17956 {"all", ARM_ANY, FPU_ARCH_FPA},
17957 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17958 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17959 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17960 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17961 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17962 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17963 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17964 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17965 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17966 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17967 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17968 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17969 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17970 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17971 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17972 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17973 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17974 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17975 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17976 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17977 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17978 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17979 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17980 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17981 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17982 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17983 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17984 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17985 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17986 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17987 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17988 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17991 /* ISA extensions in the co-processor space. */
17992 struct arm_option_cpu_value_table
17995 const arm_feature_set value;
17998 static const struct arm_option_cpu_value_table arm_extensions[] =
18000 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
18001 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
18002 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
18003 {NULL, ARM_ARCH_NONE}
18006 /* This list should, at a minimum, contain all the fpu names
18007 recognized by GCC. */
18008 static const struct arm_option_cpu_value_table arm_fpus[] =
18010 {"softfpa", FPU_NONE},
18011 {"fpe", FPU_ARCH_FPE},
18012 {"fpe2", FPU_ARCH_FPE},
18013 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
18014 {"fpa", FPU_ARCH_FPA},
18015 {"fpa10", FPU_ARCH_FPA},
18016 {"fpa11", FPU_ARCH_FPA},
18017 {"arm7500fe", FPU_ARCH_FPA},
18018 {"softvfp", FPU_ARCH_VFP},
18019 {"softvfp+vfp", FPU_ARCH_VFP_V2},
18020 {"vfp", FPU_ARCH_VFP_V2},
18021 {"vfp9", FPU_ARCH_VFP_V2},
18022 {"vfp3", FPU_ARCH_VFP_V3},
18023 {"vfp10", FPU_ARCH_VFP_V2},
18024 {"vfp10-r0", FPU_ARCH_VFP_V1},
18025 {"vfpxd", FPU_ARCH_VFP_V1xD},
18026 {"arm1020t", FPU_ARCH_VFP_V1},
18027 {"arm1020e", FPU_ARCH_VFP_V2},
18028 {"arm1136jfs", FPU_ARCH_VFP_V2},
18029 {"arm1136jf-s", FPU_ARCH_VFP_V2},
18030 {"maverick", FPU_ARCH_MAVERICK},
18031 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
18032 {NULL, ARM_ARCH_NONE}
18035 struct arm_option_value_table
18041 static const struct arm_option_value_table arm_float_abis[] =
18043 {"hard", ARM_FLOAT_ABI_HARD},
18044 {"softfp", ARM_FLOAT_ABI_SOFTFP},
18045 {"soft", ARM_FLOAT_ABI_SOFT},
18050 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
18051 static const struct arm_option_value_table arm_eabis[] =
18053 {"gnu", EF_ARM_EABI_UNKNOWN},
18054 {"4", EF_ARM_EABI_VER4},
18055 {"5", EF_ARM_EABI_VER5},
18060 struct arm_long_option_table
18062 char * option; /* Substring to match. */
18063 char * help; /* Help information. */
18064 int (* func) (char * subopt); /* Function to decode sub-option. */
18065 char * deprecated; /* If non-null, print this message. */
18069 arm_parse_extension (char * str, const arm_feature_set **opt_p)
18071 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
18073 /* Copy the feature set, so that we can modify it. */
18074 *ext_set = **opt_p;
18077 while (str != NULL && *str != 0)
18079 const struct arm_option_cpu_value_table * opt;
18085 as_bad (_("invalid architectural extension"));
18090 ext = strchr (str, '+');
18093 optlen = ext - str;
18095 optlen = strlen (str);
18099 as_bad (_("missing architectural extension"));
18103 for (opt = arm_extensions; opt->name != NULL; opt++)
18104 if (strncmp (opt->name, str, optlen) == 0)
18106 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
18110 if (opt->name == NULL)
18112 as_bad (_("unknown architectural extnsion `%s'"), str);
18123 arm_parse_cpu (char * str)
18125 const struct arm_cpu_option_table * opt;
18126 char * ext = strchr (str, '+');
18130 optlen = ext - str;
18132 optlen = strlen (str);
18136 as_bad (_("missing cpu name `%s'"), str);
18140 for (opt = arm_cpus; opt->name != NULL; opt++)
18141 if (strncmp (opt->name, str, optlen) == 0)
18143 mcpu_cpu_opt = &opt->value;
18144 mcpu_fpu_opt = &opt->default_fpu;
18145 if (opt->canonical_name)
18146 strcpy(selected_cpu_name, opt->canonical_name);
18150 for (i = 0; i < optlen; i++)
18151 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18152 selected_cpu_name[i] = 0;
18156 return arm_parse_extension (ext, &mcpu_cpu_opt);
18161 as_bad (_("unknown cpu `%s'"), str);
18166 arm_parse_arch (char * str)
18168 const struct arm_arch_option_table *opt;
18169 char *ext = strchr (str, '+');
18173 optlen = ext - str;
18175 optlen = strlen (str);
18179 as_bad (_("missing architecture name `%s'"), str);
18183 for (opt = arm_archs; opt->name != NULL; opt++)
18184 if (streq (opt->name, str))
18186 march_cpu_opt = &opt->value;
18187 march_fpu_opt = &opt->default_fpu;
18188 strcpy(selected_cpu_name, opt->name);
18191 return arm_parse_extension (ext, &march_cpu_opt);
18196 as_bad (_("unknown architecture `%s'\n"), str);
18201 arm_parse_fpu (char * str)
18203 const struct arm_option_cpu_value_table * opt;
18205 for (opt = arm_fpus; opt->name != NULL; opt++)
18206 if (streq (opt->name, str))
18208 mfpu_opt = &opt->value;
18212 as_bad (_("unknown floating point format `%s'\n"), str);
18217 arm_parse_float_abi (char * str)
18219 const struct arm_option_value_table * opt;
18221 for (opt = arm_float_abis; opt->name != NULL; opt++)
18222 if (streq (opt->name, str))
18224 mfloat_abi_opt = opt->value;
18228 as_bad (_("unknown floating point abi `%s'\n"), str);
18234 arm_parse_eabi (char * str)
18236 const struct arm_option_value_table *opt;
18238 for (opt = arm_eabis; opt->name != NULL; opt++)
18239 if (streq (opt->name, str))
18241 meabi_flags = opt->value;
18244 as_bad (_("unknown EABI `%s'\n"), str);
18249 struct arm_long_option_table arm_long_opts[] =
18251 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
18252 arm_parse_cpu, NULL},
18253 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
18254 arm_parse_arch, NULL},
18255 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
18256 arm_parse_fpu, NULL},
18257 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
18258 arm_parse_float_abi, NULL},
18260 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
18261 arm_parse_eabi, NULL},
18263 {NULL, NULL, 0, NULL}
18267 md_parse_option (int c, char * arg)
18269 struct arm_option_table *opt;
18270 const struct arm_legacy_option_table *fopt;
18271 struct arm_long_option_table *lopt;
18277 target_big_endian = 1;
18283 target_big_endian = 0;
18288 /* Listing option. Just ignore these, we don't support additional
18293 for (opt = arm_opts; opt->option != NULL; opt++)
18295 if (c == opt->option[0]
18296 && ((arg == NULL && opt->option[1] == 0)
18297 || streq (arg, opt->option + 1)))
18299 #if WARN_DEPRECATED
18300 /* If the option is deprecated, tell the user. */
18301 if (opt->deprecated != NULL)
18302 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18303 arg ? arg : "", _(opt->deprecated));
18306 if (opt->var != NULL)
18307 *opt->var = opt->value;
18313 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
18315 if (c == fopt->option[0]
18316 && ((arg == NULL && fopt->option[1] == 0)
18317 || streq (arg, fopt->option + 1)))
18319 #if WARN_DEPRECATED
18320 /* If the option is deprecated, tell the user. */
18321 if (fopt->deprecated != NULL)
18322 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
18323 arg ? arg : "", _(fopt->deprecated));
18326 if (fopt->var != NULL)
18327 *fopt->var = &fopt->value;
18333 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18335 /* These options are expected to have an argument. */
18336 if (c == lopt->option[0]
18338 && strncmp (arg, lopt->option + 1,
18339 strlen (lopt->option + 1)) == 0)
18341 #if WARN_DEPRECATED
18342 /* If the option is deprecated, tell the user. */
18343 if (lopt->deprecated != NULL)
18344 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
18345 _(lopt->deprecated));
18348 /* Call the sup-option parser. */
18349 return lopt->func (arg + strlen (lopt->option) - 1);
18360 md_show_usage (FILE * fp)
18362 struct arm_option_table *opt;
18363 struct arm_long_option_table *lopt;
18365 fprintf (fp, _(" ARM-specific assembler options:\n"));
18367 for (opt = arm_opts; opt->option != NULL; opt++)
18368 if (opt->help != NULL)
18369 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
18371 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
18372 if (lopt->help != NULL)
18373 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
18377 -EB assemble code for a big-endian cpu\n"));
18382 -EL assemble code for a little-endian cpu\n"));
18391 arm_feature_set flags;
18392 } cpu_arch_ver_table;
18394 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
18395 least features first. */
18396 static const cpu_arch_ver_table cpu_arch_ver[] =
18401 {4, ARM_ARCH_V5TE},
18402 {5, ARM_ARCH_V5TEJ},
18406 {9, ARM_ARCH_V6T2},
18407 {10, ARM_ARCH_V7A},
18408 {10, ARM_ARCH_V7R},
18409 {10, ARM_ARCH_V7M},
18413 /* Set the public EABI object attributes. */
18415 aeabi_set_public_attributes (void)
18418 arm_feature_set flags;
18419 arm_feature_set tmp;
18420 const cpu_arch_ver_table *p;
18422 /* Choose the architecture based on the capabilities of the requested cpu
18423 (if any) and/or the instructions actually used. */
18424 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
18425 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
18426 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
18430 for (p = cpu_arch_ver; p->val; p++)
18432 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
18435 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
18439 /* Tag_CPU_name. */
18440 if (selected_cpu_name[0])
18444 p = selected_cpu_name;
18445 if (strncmp(p, "armv", 4) == 0)
18450 for (i = 0; p[i]; i++)
18451 p[i] = TOUPPER (p[i]);
18453 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
18455 /* Tag_CPU_arch. */
18456 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
18457 /* Tag_CPU_arch_profile. */
18458 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
18459 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
18460 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
18461 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
18462 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
18463 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
18464 /* Tag_ARM_ISA_use. */
18465 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
18466 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
18467 /* Tag_THUMB_ISA_use. */
18468 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
18469 elf32_arm_add_eabi_attr_int (stdoutput, 9,
18470 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
18471 /* Tag_VFP_arch. */
18472 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
18473 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
18474 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
18475 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
18476 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
18477 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
18478 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
18479 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
18480 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
18481 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
18482 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
18483 /* Tag_WMMX_arch. */
18484 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
18485 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
18486 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
18487 /* Tag_NEON_arch. */
18488 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
18489 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
18490 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
18493 /* Add the .ARM.attributes section. */
18502 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
18505 aeabi_set_public_attributes ();
18506 size = elf32_arm_eabi_attr_size (stdoutput);
18507 s = subseg_new (".ARM.attributes", 0);
18508 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
18509 addr = frag_now_fix ();
18510 p = frag_more (size);
18511 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
18513 #endif /* OBJ_ELF */
18516 /* Parse a .cpu directive. */
18519 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
18521 const struct arm_cpu_option_table *opt;
18525 name = input_line_pointer;
18526 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18527 input_line_pointer++;
18528 saved_char = *input_line_pointer;
18529 *input_line_pointer = 0;
18531 /* Skip the first "all" entry. */
18532 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
18533 if (streq (opt->name, name))
18535 mcpu_cpu_opt = &opt->value;
18536 selected_cpu = opt->value;
18537 if (opt->canonical_name)
18538 strcpy(selected_cpu_name, opt->canonical_name);
18542 for (i = 0; opt->name[i]; i++)
18543 selected_cpu_name[i] = TOUPPER (opt->name[i]);
18544 selected_cpu_name[i] = 0;
18546 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18547 *input_line_pointer = saved_char;
18548 demand_empty_rest_of_line ();
18551 as_bad (_("unknown cpu `%s'"), name);
18552 *input_line_pointer = saved_char;
18553 ignore_rest_of_line ();
18557 /* Parse a .arch directive. */
18560 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
18562 const struct arm_arch_option_table *opt;
18566 name = input_line_pointer;
18567 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18568 input_line_pointer++;
18569 saved_char = *input_line_pointer;
18570 *input_line_pointer = 0;
18572 /* Skip the first "all" entry. */
18573 for (opt = arm_archs + 1; opt->name != NULL; opt++)
18574 if (streq (opt->name, name))
18576 mcpu_cpu_opt = &opt->value;
18577 selected_cpu = opt->value;
18578 strcpy(selected_cpu_name, opt->name);
18579 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18580 *input_line_pointer = saved_char;
18581 demand_empty_rest_of_line ();
18585 as_bad (_("unknown architecture `%s'\n"), name);
18586 *input_line_pointer = saved_char;
18587 ignore_rest_of_line ();
18591 /* Parse a .fpu directive. */
18594 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
18596 const struct arm_option_cpu_value_table *opt;
18600 name = input_line_pointer;
18601 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
18602 input_line_pointer++;
18603 saved_char = *input_line_pointer;
18604 *input_line_pointer = 0;
18606 for (opt = arm_fpus; opt->name != NULL; opt++)
18607 if (streq (opt->name, name))
18609 mfpu_opt = &opt->value;
18610 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
18611 *input_line_pointer = saved_char;
18612 demand_empty_rest_of_line ();
18616 as_bad (_("unknown floating point format `%s'\n"), name);
18617 *input_line_pointer = saved_char;
18618 ignore_rest_of_line ();