1 /* tc-arm.c -- Assemble for the ARM
2 Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
4 Free Software Foundation, Inc.
5 Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
6 Modified by David Taylor (dtaylor@armltd.co.uk)
7 Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
8 Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
9 Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
11 This file is part of GAS, the GNU Assembler.
13 GAS is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 GAS is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with GAS; see the file COPYING. If not, write to the Free
25 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
32 #include "safe-ctype.h"
34 /* Need TARGET_CPU. */
41 #include "opcode/arm.h"
45 #include "dwarf2dbg.h"
46 #include "dw2gencfi.h"
49 /* XXX Set this to 1 after the next binutils release. */
50 #define WARN_DEPRECATED 0
53 /* Must be at least the size of the largest unwind opcode (currently two). */
54 #define ARM_OPCODE_CHUNK_SIZE 8
56 /* This structure holds the unwinding state. */
61 symbolS * table_entry;
62 symbolS * personality_routine;
63 int personality_index;
64 /* The segment containing the function. */
67 /* Opcodes generated from this function. */
68 unsigned char * opcodes;
71 /* The number of bytes pushed to the stack. */
73 /* We don't add stack adjustment opcodes immediately so that we can merge
74 multiple adjustments. We can also omit the final adjustment
75 when using a frame pointer. */
76 offsetT pending_offset;
77 /* These two fields are set by both unwind_movsp and unwind_setfp. They
78 hold the reg+offset to use when restoring sp from a frame pointer. */
81 /* Nonzero if an unwind_setfp directive has been seen. */
83 /* Nonzero if the last opcode restores sp from fp_reg. */
84 unsigned sp_restored:1;
87 /* Bit N indicates that an R_ARM_NONE relocation has been output for
88 __aeabi_unwind_cpp_prN already if set. This enables dependencies to be
89 emitted only once per section, to save unnecessary bloat. */
90 static unsigned int marked_pr_dependency = 0;
101 /* Types of processor to assemble for. */
103 #if defined __XSCALE__
104 #define CPU_DEFAULT ARM_ARCH_XSCALE
106 #if defined __thumb__
107 #define CPU_DEFAULT ARM_ARCH_V5T
114 # define FPU_DEFAULT FPU_ARCH_FPA
115 # elif defined (TE_NetBSD)
117 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, but VFP order. */
119 /* Legacy a.out format. */
120 # define FPU_DEFAULT FPU_ARCH_FPA /* Soft-float, but FPA order. */
122 # elif defined (TE_VXWORKS)
123 # define FPU_DEFAULT FPU_ARCH_VFP /* Soft-float, VFP order. */
125 /* For backwards compatibility, default to FPA. */
126 # define FPU_DEFAULT FPU_ARCH_FPA
128 #endif /* ifndef FPU_DEFAULT */
130 #define streq(a, b) (strcmp (a, b) == 0)
132 static arm_feature_set cpu_variant;
133 static arm_feature_set arm_arch_used;
134 static arm_feature_set thumb_arch_used;
136 /* Flags stored in private area of BFD structure. */
137 static int uses_apcs_26 = FALSE;
138 static int atpcs = FALSE;
139 static int support_interwork = FALSE;
140 static int uses_apcs_float = FALSE;
141 static int pic_code = FALSE;
143 /* Variables that we set while parsing command-line options. Once all
144 options have been read we re-process these values to set the real
146 static const arm_feature_set *legacy_cpu = NULL;
147 static const arm_feature_set *legacy_fpu = NULL;
149 static const arm_feature_set *mcpu_cpu_opt = NULL;
150 static const arm_feature_set *mcpu_fpu_opt = NULL;
151 static const arm_feature_set *march_cpu_opt = NULL;
152 static const arm_feature_set *march_fpu_opt = NULL;
153 static const arm_feature_set *mfpu_opt = NULL;
155 /* Constants for known architecture features. */
156 static const arm_feature_set fpu_default = FPU_DEFAULT;
157 static const arm_feature_set fpu_arch_vfp_v1 = FPU_ARCH_VFP_V1;
158 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
159 static const arm_feature_set fpu_arch_vfp_v3 = FPU_ARCH_VFP_V3;
160 static const arm_feature_set fpu_arch_neon_v1 = FPU_ARCH_NEON_V1;
161 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
162 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
163 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
164 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
167 static const arm_feature_set cpu_default = CPU_DEFAULT;
170 static const arm_feature_set arm_ext_v1 = ARM_FEATURE (ARM_EXT_V1, 0);
171 static const arm_feature_set arm_ext_v2 = ARM_FEATURE (ARM_EXT_V1, 0);
172 static const arm_feature_set arm_ext_v2s = ARM_FEATURE (ARM_EXT_V2S, 0);
173 static const arm_feature_set arm_ext_v3 = ARM_FEATURE (ARM_EXT_V3, 0);
174 static const arm_feature_set arm_ext_v3m = ARM_FEATURE (ARM_EXT_V3M, 0);
175 static const arm_feature_set arm_ext_v4 = ARM_FEATURE (ARM_EXT_V4, 0);
176 static const arm_feature_set arm_ext_v4t = ARM_FEATURE (ARM_EXT_V4T, 0);
177 static const arm_feature_set arm_ext_v5 = ARM_FEATURE (ARM_EXT_V5, 0);
178 static const arm_feature_set arm_ext_v4t_5 =
179 ARM_FEATURE (ARM_EXT_V4T | ARM_EXT_V5, 0);
180 static const arm_feature_set arm_ext_v5t = ARM_FEATURE (ARM_EXT_V5T, 0);
181 static const arm_feature_set arm_ext_v5e = ARM_FEATURE (ARM_EXT_V5E, 0);
182 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE (ARM_EXT_V5ExP, 0);
183 static const arm_feature_set arm_ext_v5j = ARM_FEATURE (ARM_EXT_V5J, 0);
184 static const arm_feature_set arm_ext_v6 = ARM_FEATURE (ARM_EXT_V6, 0);
185 static const arm_feature_set arm_ext_v6k = ARM_FEATURE (ARM_EXT_V6K, 0);
186 static const arm_feature_set arm_ext_v6z = ARM_FEATURE (ARM_EXT_V6Z, 0);
187 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE (ARM_EXT_V6T2, 0);
188 static const arm_feature_set arm_ext_v6_notm = ARM_FEATURE (ARM_EXT_V6_NOTM, 0);
189 static const arm_feature_set arm_ext_div = ARM_FEATURE (ARM_EXT_DIV, 0);
190 static const arm_feature_set arm_ext_v7 = ARM_FEATURE (ARM_EXT_V7, 0);
191 static const arm_feature_set arm_ext_v7a = ARM_FEATURE (ARM_EXT_V7A, 0);
192 static const arm_feature_set arm_ext_v7r = ARM_FEATURE (ARM_EXT_V7R, 0);
193 static const arm_feature_set arm_ext_v7m = ARM_FEATURE (ARM_EXT_V7M, 0);
195 static const arm_feature_set arm_arch_any = ARM_ANY;
196 static const arm_feature_set arm_arch_full = ARM_FEATURE (-1, -1);
197 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
198 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
200 static const arm_feature_set arm_cext_iwmmxt =
201 ARM_FEATURE (0, ARM_CEXT_IWMMXT);
202 static const arm_feature_set arm_cext_xscale =
203 ARM_FEATURE (0, ARM_CEXT_XSCALE);
204 static const arm_feature_set arm_cext_maverick =
205 ARM_FEATURE (0, ARM_CEXT_MAVERICK);
206 static const arm_feature_set fpu_fpa_ext_v1 = ARM_FEATURE (0, FPU_FPA_EXT_V1);
207 static const arm_feature_set fpu_fpa_ext_v2 = ARM_FEATURE (0, FPU_FPA_EXT_V2);
208 static const arm_feature_set fpu_vfp_ext_v1xd =
209 ARM_FEATURE (0, FPU_VFP_EXT_V1xD);
210 static const arm_feature_set fpu_vfp_ext_v1 = ARM_FEATURE (0, FPU_VFP_EXT_V1);
211 static const arm_feature_set fpu_vfp_ext_v2 = ARM_FEATURE (0, FPU_VFP_EXT_V2);
212 static const arm_feature_set fpu_vfp_ext_v3 = ARM_FEATURE (0, FPU_VFP_EXT_V3);
213 static const arm_feature_set fpu_neon_ext_v1 = ARM_FEATURE (0, FPU_NEON_EXT_V1);
214 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
215 ARM_FEATURE (0, FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
217 static int mfloat_abi_opt = -1;
218 /* Record user cpu selection for object attributes. */
219 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
220 /* Must be long enough to hold any of the names in arm_cpus. */
221 static char selected_cpu_name[16];
224 static int meabi_flags = EABI_DEFAULT;
226 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
231 /* Pre-defined "_GLOBAL_OFFSET_TABLE_" */
232 symbolS * GOT_symbol;
235 /* 0: assemble for ARM,
236 1: assemble for Thumb,
237 2: assemble for Thumb even though target CPU does not support thumb
239 static int thumb_mode = 0;
241 /* If unified_syntax is true, we are processing the new unified
242 ARM/Thumb syntax. Important differences from the old ARM mode:
244 - Immediate operands do not require a # prefix.
245 - Conditional affixes always appear at the end of the
246 instruction. (For backward compatibility, those instructions
247 that formerly had them in the middle, continue to accept them
249 - The IT instruction may appear, and if it does is validated
250 against subsequent conditional affixes. It does not generate
253 Important differences from the old Thumb mode:
255 - Immediate operands do not require a # prefix.
256 - Most of the V6T2 instructions are only available in unified mode.
257 - The .N and .W suffixes are recognized and honored (it is an error
258 if they cannot be honored).
259 - All instructions set the flags if and only if they have an 's' affix.
260 - Conditional affixes may be used. They are validated against
261 preceding IT instructions. Unlike ARM mode, you cannot use a
262 conditional affix except in the scope of an IT instruction. */
264 static bfd_boolean unified_syntax = FALSE;
279 enum neon_el_type type;
283 #define NEON_MAX_TYPE_ELS 4
287 struct neon_type_el el[NEON_MAX_TYPE_ELS];
294 unsigned long instruction;
298 struct neon_type vectype;
299 /* Set to the opcode if the instruction needs relaxation.
300 Zero if the instruction is not relaxed. */
304 bfd_reloc_code_real_type type;
313 unsigned present : 1; /* Operand present. */
314 unsigned isreg : 1; /* Operand was a register. */
315 unsigned immisreg : 1; /* .imm field is a second register. */
316 unsigned isscalar : 1; /* Operand is a (Neon) scalar. */
317 unsigned immisalign : 1; /* Immediate is an alignment specifier. */
318 /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
319 instructions. This allows us to disambiguate ARM <-> vector insns. */
320 unsigned regisimm : 1; /* 64-bit immediate, reg forms high 32 bits. */
321 unsigned isquad : 1; /* Operand is Neon quad-precision register. */
322 unsigned hasreloc : 1; /* Operand has relocation suffix. */
323 unsigned writeback : 1; /* Operand has trailing ! */
324 unsigned preind : 1; /* Preindexed address. */
325 unsigned postind : 1; /* Postindexed address. */
326 unsigned negative : 1; /* Index register was negated. */
327 unsigned shifted : 1; /* Shift applied to operation. */
328 unsigned shift_kind : 3; /* Shift operation (enum shift_kind). */
332 static struct arm_it inst;
334 #define NUM_FLOAT_VALS 8
336 const char * fp_const[] =
338 "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
341 /* Number of littlenums required to hold an extended precision number. */
342 #define MAX_LITTLENUMS 6
344 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
354 #define CP_T_X 0x00008000
355 #define CP_T_Y 0x00400000
357 #define CONDS_BIT 0x00100000
358 #define LOAD_BIT 0x00100000
360 #define DOUBLE_LOAD_FLAG 0x00000001
364 const char * template;
368 #define COND_ALWAYS 0xE
372 const char *template;
376 struct asm_barrier_opt
378 const char *template;
382 /* The bit that distinguishes CPSR and SPSR. */
383 #define SPSR_BIT (1 << 22)
385 /* The individual PSR flag bits. */
386 #define PSR_c (1 << 16)
387 #define PSR_x (1 << 17)
388 #define PSR_s (1 << 18)
389 #define PSR_f (1 << 19)
394 bfd_reloc_code_real_type reloc;
399 VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
400 VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
405 VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
408 /* ARM register categories. This includes coprocessor numbers and various
409 architecture extensions' registers. */
433 /* Structure for a hash table entry for a register. */
437 unsigned char number;
439 unsigned char builtin;
442 /* Diagnostics used when we don't get a register of the expected type. */
443 const char *const reg_expected_msgs[] =
445 N_("ARM register expected"),
446 N_("bad or missing co-processor number"),
447 N_("co-processor register expected"),
448 N_("FPA register expected"),
449 N_("VFP single precision register expected"),
450 N_("VFP/Neon double precision register expected"),
451 N_("Neon quad precision register expected"),
452 N_("Neon double or quad precision register expected"),
453 N_("VFP system register expected"),
454 N_("Maverick MVF register expected"),
455 N_("Maverick MVD register expected"),
456 N_("Maverick MVFX register expected"),
457 N_("Maverick MVDX register expected"),
458 N_("Maverick MVAX register expected"),
459 N_("Maverick DSPSC register expected"),
460 N_("iWMMXt data register expected"),
461 N_("iWMMXt control register expected"),
462 N_("iWMMXt scalar register expected"),
463 N_("XScale accumulator register expected"),
466 /* Some well known registers that we refer to directly elsewhere. */
471 /* ARM instructions take 4bytes in the object file, Thumb instructions
477 /* Basic string to match. */
478 const char *template;
480 /* Parameters to instruction. */
481 unsigned char operands[8];
483 /* Conditional tag - see opcode_lookup. */
484 unsigned int tag : 4;
486 /* Basic instruction code. */
487 unsigned int avalue : 28;
489 /* Thumb-format instruction code. */
492 /* Which architecture variant provides this instruction. */
493 const arm_feature_set *avariant;
494 const arm_feature_set *tvariant;
496 /* Function to call to encode instruction in ARM format. */
497 void (* aencode) (void);
499 /* Function to call to encode instruction in Thumb format. */
500 void (* tencode) (void);
503 /* Defines for various bits that we will want to toggle. */
504 #define INST_IMMEDIATE 0x02000000
505 #define OFFSET_REG 0x02000000
506 #define HWOFFSET_IMM 0x00400000
507 #define SHIFT_BY_REG 0x00000010
508 #define PRE_INDEX 0x01000000
509 #define INDEX_UP 0x00800000
510 #define WRITE_BACK 0x00200000
511 #define LDM_TYPE_2_OR_3 0x00400000
513 #define LITERAL_MASK 0xf000f000
514 #define OPCODE_MASK 0xfe1fffff
515 #define V4_STR_BIT 0x00000020
517 #define DATA_OP_SHIFT 21
519 #define T2_OPCODE_MASK 0xfe1fffff
520 #define T2_DATA_OP_SHIFT 21
522 /* Codes to distinguish the arithmetic instructions. */
533 #define OPCODE_CMP 10
534 #define OPCODE_CMN 11
535 #define OPCODE_ORR 12
536 #define OPCODE_MOV 13
537 #define OPCODE_BIC 14
538 #define OPCODE_MVN 15
540 #define T2_OPCODE_AND 0
541 #define T2_OPCODE_BIC 1
542 #define T2_OPCODE_ORR 2
543 #define T2_OPCODE_ORN 3
544 #define T2_OPCODE_EOR 4
545 #define T2_OPCODE_ADD 8
546 #define T2_OPCODE_ADC 10
547 #define T2_OPCODE_SBC 11
548 #define T2_OPCODE_SUB 13
549 #define T2_OPCODE_RSB 14
551 #define T_OPCODE_MUL 0x4340
552 #define T_OPCODE_TST 0x4200
553 #define T_OPCODE_CMN 0x42c0
554 #define T_OPCODE_NEG 0x4240
555 #define T_OPCODE_MVN 0x43c0
557 #define T_OPCODE_ADD_R3 0x1800
558 #define T_OPCODE_SUB_R3 0x1a00
559 #define T_OPCODE_ADD_HI 0x4400
560 #define T_OPCODE_ADD_ST 0xb000
561 #define T_OPCODE_SUB_ST 0xb080
562 #define T_OPCODE_ADD_SP 0xa800
563 #define T_OPCODE_ADD_PC 0xa000
564 #define T_OPCODE_ADD_I8 0x3000
565 #define T_OPCODE_SUB_I8 0x3800
566 #define T_OPCODE_ADD_I3 0x1c00
567 #define T_OPCODE_SUB_I3 0x1e00
569 #define T_OPCODE_ASR_R 0x4100
570 #define T_OPCODE_LSL_R 0x4080
571 #define T_OPCODE_LSR_R 0x40c0
572 #define T_OPCODE_ROR_R 0x41c0
573 #define T_OPCODE_ASR_I 0x1000
574 #define T_OPCODE_LSL_I 0x0000
575 #define T_OPCODE_LSR_I 0x0800
577 #define T_OPCODE_MOV_I8 0x2000
578 #define T_OPCODE_CMP_I8 0x2800
579 #define T_OPCODE_CMP_LR 0x4280
580 #define T_OPCODE_MOV_HR 0x4600
581 #define T_OPCODE_CMP_HR 0x4500
583 #define T_OPCODE_LDR_PC 0x4800
584 #define T_OPCODE_LDR_SP 0x9800
585 #define T_OPCODE_STR_SP 0x9000
586 #define T_OPCODE_LDR_IW 0x6800
587 #define T_OPCODE_STR_IW 0x6000
588 #define T_OPCODE_LDR_IH 0x8800
589 #define T_OPCODE_STR_IH 0x8000
590 #define T_OPCODE_LDR_IB 0x7800
591 #define T_OPCODE_STR_IB 0x7000
592 #define T_OPCODE_LDR_RW 0x5800
593 #define T_OPCODE_STR_RW 0x5000
594 #define T_OPCODE_LDR_RH 0x5a00
595 #define T_OPCODE_STR_RH 0x5200
596 #define T_OPCODE_LDR_RB 0x5c00
597 #define T_OPCODE_STR_RB 0x5400
599 #define T_OPCODE_PUSH 0xb400
600 #define T_OPCODE_POP 0xbc00
602 #define T_OPCODE_BRANCH 0xe000
604 #define THUMB_SIZE 2 /* Size of thumb instruction. */
605 #define THUMB_PP_PC_LR 0x0100
606 #define THUMB_LOAD_BIT 0x0800
607 #define THUMB2_LOAD_BIT 0x00100000
609 #define BAD_ARGS _("bad arguments to instruction")
610 #define BAD_PC _("r15 not allowed here")
611 #define BAD_COND _("instruction cannot be conditional")
612 #define BAD_OVERLAP _("registers may not be the same")
613 #define BAD_HIREG _("lo register required")
614 #define BAD_THUMB32 _("instruction not supported in Thumb16 mode")
615 #define BAD_ADDR_MODE _("instruction does not accept this addressing mode");
616 #define BAD_BRANCH _("branch must be last instruction in IT block")
617 #define BAD_NOT_IT _("instruction not allowed in IT block")
619 static struct hash_control *arm_ops_hsh;
620 static struct hash_control *arm_cond_hsh;
621 static struct hash_control *arm_shift_hsh;
622 static struct hash_control *arm_psr_hsh;
623 static struct hash_control *arm_v7m_psr_hsh;
624 static struct hash_control *arm_reg_hsh;
625 static struct hash_control *arm_reloc_hsh;
626 static struct hash_control *arm_barrier_opt_hsh;
628 /* Stuff needed to resolve the label ambiguity
638 symbolS * last_label_seen;
639 static int label_is_thumb_function_name = FALSE;
641 /* Literal pool structure. Held on a per-section
642 and per-sub-section basis. */
644 #define MAX_LITERAL_POOL_SIZE 1024
645 typedef struct literal_pool
647 expressionS literals [MAX_LITERAL_POOL_SIZE];
648 unsigned int next_free_entry;
653 struct literal_pool * next;
656 /* Pointer to a linked list of literal pools. */
657 literal_pool * list_of_pools = NULL;
659 /* State variables for IT block handling. */
660 static bfd_boolean current_it_mask = 0;
661 static int current_cc;
666 /* This array holds the chars that always start a comment. If the
667 pre-processor is disabled, these aren't very useful. */
668 const char comment_chars[] = "@";
670 /* This array holds the chars that only start a comment at the beginning of
671 a line. If the line seems to have the form '# 123 filename'
672 .line and .file directives will appear in the pre-processed output. */
673 /* Note that input_file.c hand checks for '#' at the beginning of the
674 first line of the input file. This is because the compiler outputs
675 #NO_APP at the beginning of its output. */
676 /* Also note that comments like this one will always work. */
677 const char line_comment_chars[] = "#";
679 const char line_separator_chars[] = ";";
681 /* Chars that can be used to separate mant
682 from exp in floating point numbers. */
683 const char EXP_CHARS[] = "eE";
685 /* Chars that mean this number is a floating point constant. */
689 const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
691 /* Prefix characters that indicate the start of an immediate
693 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
695 /* Separator character handling. */
697 #define skip_whitespace(str) do { if (*(str) == ' ') ++(str); } while (0)
700 skip_past_char (char ** str, char c)
710 #define skip_past_comma(str) skip_past_char (str, ',')
712 /* Arithmetic expressions (possibly involving symbols). */
714 /* Return TRUE if anything in the expression is a bignum. */
717 walk_no_bignums (symbolS * sp)
719 if (symbol_get_value_expression (sp)->X_op == O_big)
722 if (symbol_get_value_expression (sp)->X_add_symbol)
724 return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
725 || (symbol_get_value_expression (sp)->X_op_symbol
726 && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
732 static int in_my_get_expression = 0;
734 /* Third argument to my_get_expression. */
735 #define GE_NO_PREFIX 0
736 #define GE_IMM_PREFIX 1
737 #define GE_OPT_PREFIX 2
738 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
739 immediates, as can be used in Neon VMVN and VMOV immediate instructions. */
740 #define GE_OPT_PREFIX_BIG 3
743 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
748 /* In unified syntax, all prefixes are optional. */
750 prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
755 case GE_NO_PREFIX: break;
757 if (!is_immediate_prefix (**str))
759 inst.error = _("immediate expression requires a # prefix");
765 case GE_OPT_PREFIX_BIG:
766 if (is_immediate_prefix (**str))
772 memset (ep, 0, sizeof (expressionS));
774 save_in = input_line_pointer;
775 input_line_pointer = *str;
776 in_my_get_expression = 1;
777 seg = expression (ep);
778 in_my_get_expression = 0;
780 if (ep->X_op == O_illegal)
782 /* We found a bad expression in md_operand(). */
783 *str = input_line_pointer;
784 input_line_pointer = save_in;
785 if (inst.error == NULL)
786 inst.error = _("bad expression");
791 if (seg != absolute_section
792 && seg != text_section
793 && seg != data_section
794 && seg != bss_section
795 && seg != undefined_section)
797 inst.error = _("bad segment");
798 *str = input_line_pointer;
799 input_line_pointer = save_in;
804 /* Get rid of any bignums now, so that we don't generate an error for which
805 we can't establish a line number later on. Big numbers are never valid
806 in instructions, which is where this routine is always called. */
807 if (prefix_mode != GE_OPT_PREFIX_BIG
808 && (ep->X_op == O_big
810 && (walk_no_bignums (ep->X_add_symbol)
812 && walk_no_bignums (ep->X_op_symbol))))))
814 inst.error = _("invalid constant");
815 *str = input_line_pointer;
816 input_line_pointer = save_in;
820 *str = input_line_pointer;
821 input_line_pointer = save_in;
825 /* Turn a string in input_line_pointer into a floating point constant
826 of type TYPE, and store the appropriate bytes in *LITP. The number
827 of LITTLENUMS emitted is stored in *SIZEP. An error message is
828 returned, or NULL on OK.
830 Note that fp constants aren't represent in the normal way on the ARM.
831 In big endian mode, things are as expected. However, in little endian
832 mode fp constants are big-endian word-wise, and little-endian byte-wise
833 within the words. For example, (double) 1.1 in big endian mode is
834 the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
835 the byte sequence 99 99 f1 3f 9a 99 99 99.
837 ??? The format of 12 byte floats is uncertain according to gcc's arm.h. */
840 md_atof (int type, char * litP, int * sizeP)
843 LITTLENUM_TYPE words[MAX_LITTLENUMS];
875 return _("bad call to MD_ATOF()");
878 t = atof_ieee (input_line_pointer, type, words);
880 input_line_pointer = t;
883 if (target_big_endian)
885 for (i = 0; i < prec; i++)
887 md_number_to_chars (litP, (valueT) words[i], 2);
893 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
894 for (i = prec - 1; i >= 0; i--)
896 md_number_to_chars (litP, (valueT) words[i], 2);
900 /* For a 4 byte float the order of elements in `words' is 1 0.
901 For an 8 byte float the order is 1 0 3 2. */
902 for (i = 0; i < prec; i += 2)
904 md_number_to_chars (litP, (valueT) words[i + 1], 2);
905 md_number_to_chars (litP + 2, (valueT) words[i], 2);
913 /* We handle all bad expressions here, so that we can report the faulty
914 instruction in the error message. */
916 md_operand (expressionS * expr)
918 if (in_my_get_expression)
919 expr->X_op = O_illegal;
922 /* Immediate values. */
924 /* Generic immediate-value read function for use in directives.
925 Accepts anything that 'expression' can fold to a constant.
926 *val receives the number. */
929 immediate_for_directive (int *val)
932 exp.X_op = O_illegal;
934 if (is_immediate_prefix (*input_line_pointer))
936 input_line_pointer++;
940 if (exp.X_op != O_constant)
942 as_bad (_("expected #constant"));
943 ignore_rest_of_line ();
946 *val = exp.X_add_number;
951 /* Register parsing. */
953 /* Generic register parser. CCP points to what should be the
954 beginning of a register name. If it is indeed a valid register
955 name, advance CCP over it and return the reg_entry structure;
956 otherwise return NULL. Does not issue diagnostics. */
958 static struct reg_entry *
959 arm_reg_parse_multi (char **ccp)
963 struct reg_entry *reg;
965 #ifdef REGISTER_PREFIX
966 if (*start != REGISTER_PREFIX)
970 #ifdef OPTIONAL_REGISTER_PREFIX
971 if (*start == OPTIONAL_REGISTER_PREFIX)
976 if (!ISALPHA (*p) || !is_name_beginner (*p))
981 while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
983 reg = (struct reg_entry *) hash_find_n (arm_reg_hsh, start, p - start);
992 /* As above, but the register must be of type TYPE, and the return
993 value is the register number or FAIL.
994 If RTYPE is non-zero, return the (possibly restricted) type of the
995 register (e.g. Neon double or quad reg when either has been requested). */
998 arm_reg_parse (char **ccp, enum arm_reg_type type, enum arm_reg_type *rtype)
1001 struct reg_entry *reg = arm_reg_parse_multi (ccp);
1003 /* Undo polymorphism for Neon D and Q registers. */
1004 if (reg && type == REG_TYPE_NDQ
1005 && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1011 if (reg && reg->type == type)
1014 /* Alternative syntaxes are accepted for a few register classes. */
1021 /* Generic coprocessor register names are allowed for these. */
1022 if (reg && reg->type == REG_TYPE_CN)
1027 /* For backward compatibility, a bare number is valid here. */
1029 unsigned long processor = strtoul (start, ccp, 10);
1030 if (*ccp != start && processor <= 15)
1034 case REG_TYPE_MMXWC:
1035 /* WC includes WCG. ??? I'm not sure this is true for all
1036 instructions that take WC registers. */
1037 if (reg && reg->type == REG_TYPE_MMXWCG)
1049 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1050 have enough information to be able to do a good job bounds-checking. So, we
1051 just do easy checks here, and do further checks later. */
1054 parse_scalar (char **ccp, int elsize)
1060 if ((regno = arm_reg_parse (&str, REG_TYPE_VFD, NULL)) == FAIL)
1063 if (skip_past_char (&str, '[') == FAIL)
1066 my_get_expression (&exp, &str, GE_NO_PREFIX);
1067 if (exp.X_op != O_constant)
1069 inst.error = _("constant expression required");
1072 elno = exp.X_add_number;
1074 if (elno >= 64 / elsize)
1076 inst.error = _("scalar index out of range");
1080 if (skip_past_char (&str, ']') == FAIL)
1083 /* Parsed scalar successfully. Skip over it. */
1086 return (regno * 8) + elno;
1089 /* Parse an ARM register list. Returns the bitmask, or FAIL. */
1091 parse_reg_list (char ** strp)
1093 char * str = * strp;
1097 /* We come back here if we get ranges concatenated by '+' or '|'. */
1112 if ((reg = arm_reg_parse (&str, REG_TYPE_RN, NULL)) == FAIL)
1114 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
1124 inst.error = _("bad range in register list");
1128 for (i = cur_reg + 1; i < reg; i++)
1130 if (range & (1 << i))
1132 (_("Warning: duplicated register (r%d) in register list"),
1140 if (range & (1 << reg))
1141 as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
1143 else if (reg <= cur_reg)
1144 as_tsktsk (_("Warning: register range not in ascending order"));
1149 while (skip_past_comma (&str) != FAIL
1150 || (in_range = 1, *str++ == '-'));
1155 inst.error = _("missing `}'");
1163 if (my_get_expression (&expr, &str, GE_NO_PREFIX))
1166 if (expr.X_op == O_constant)
1168 if (expr.X_add_number
1169 != (expr.X_add_number & 0x0000ffff))
1171 inst.error = _("invalid register mask");
1175 if ((range & expr.X_add_number) != 0)
1177 int regno = range & expr.X_add_number;
1180 regno = (1 << regno) - 1;
1182 (_("Warning: duplicated register (r%d) in register list"),
1186 range |= expr.X_add_number;
1190 if (inst.reloc.type != 0)
1192 inst.error = _("expression too complex");
1196 memcpy (&inst.reloc.exp, &expr, sizeof (expressionS));
1197 inst.reloc.type = BFD_RELOC_ARM_MULTI;
1198 inst.reloc.pc_rel = 0;
1202 if (*str == '|' || *str == '+')
1208 while (another_range);
1214 /* Types of registers in a list. */
1223 /* Parse a VFP register list. If the string is invalid return FAIL.
1224 Otherwise return the number of registers, and set PBASE to the first
1225 register. Parses registers of type ETYPE.
1226 If REGLIST_NEON_D is used, several syntax enhancements are enabled:
1227 - Q registers can be used to specify pairs of D registers
1228 - { } can be omitted from around a singleton register list
1229 FIXME: This is not implemented, as it would require backtracking in
1232 This could be done (the meaning isn't really ambiguous), but doesn't
1233 fit in well with the current parsing framework.
1234 - 32 D registers may be used (also true for VFPv3). */
1237 parse_vfp_reg_list (char **str, unsigned int *pbase, enum reg_list_els etype)
1241 enum arm_reg_type regtype = 0;
1245 unsigned long mask = 0;
1250 inst.error = _("expecting {");
1259 regtype = REG_TYPE_VFS;
1264 regtype = REG_TYPE_VFD;
1265 /* VFPv3 allows 32 D registers. */
1266 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
1270 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
1273 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
1280 case REGLIST_NEON_D:
1281 regtype = REG_TYPE_NDQ;
1286 base_reg = max_regs;
1290 int setmask = 1, addregs = 1;
1291 new_base = arm_reg_parse (str, regtype, ®type);
1292 if (new_base == FAIL)
1294 inst.error = gettext (reg_expected_msgs[regtype]);
1298 /* Note: a value of 2 * n is returned for the register Q<n>. */
1299 if (regtype == REG_TYPE_NQ)
1305 if (new_base < base_reg)
1306 base_reg = new_base;
1308 if (mask & (setmask << new_base))
1310 inst.error = _("invalid register list");
1314 if ((mask >> new_base) != 0 && ! warned)
1316 as_tsktsk (_("register list not in ascending order"));
1320 mask |= setmask << new_base;
1323 if (**str == '-') /* We have the start of a range expression */
1329 if ((high_range = arm_reg_parse (str, regtype, NULL)) == FAIL)
1331 inst.error = gettext (reg_expected_msgs[regtype]);
1335 if (regtype == REG_TYPE_NQ)
1336 high_range = high_range + 1;
1338 if (high_range <= new_base)
1340 inst.error = _("register range not in ascending order");
1344 for (new_base += addregs; new_base <= high_range; new_base += addregs)
1346 if (mask & (setmask << new_base))
1348 inst.error = _("invalid register list");
1352 mask |= setmask << new_base;
1357 while (skip_past_comma (str) != FAIL);
1361 /* Sanity check -- should have raised a parse error above. */
1362 if (count == 0 || count > max_regs)
1367 /* Final test -- the registers must be consecutive. */
1369 for (i = 0; i < count; i++)
1371 if ((mask & (1u << i)) == 0)
1373 inst.error = _("non-contiguous register range");
1381 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
1382 The base register is put in *PBASE.
1383 The lane (or one of the #defined constants below) is placed in bits [3:0] of
1385 The register stride (minus one) is put in bit 4 of the return value.
1386 Bits [6:5] encode the list length (minus one). */
1388 #define NEON_ALL_LANES 15
1389 #define NEON_INTERLEAVE_LANES 14
1390 #define NEON_LANE(X) ((X) & 0xf)
1391 #define NEON_REG_STRIDE(X) (((X) & (1 << 4)) ? 2 : 1)
1392 #define NEON_REGLIST_LENGTH(X) ((((X) >> 5) & 3) + 1)
1395 parse_neon_el_struct_list (char **str, unsigned *pbase)
1402 int leading_brace = 0;
1403 enum arm_reg_type rtype = REG_TYPE_NDQ;
1405 const char *const incr_error = "register stride must be 1 or 2";
1406 const char *const type_error = "mismatched element/structure types in list";
1408 if (skip_past_char (&ptr, '{') == SUCCESS)
1413 int getreg = arm_reg_parse (&ptr, rtype, &rtype);
1416 inst.error = _(reg_expected_msgs[rtype]);
1423 if (rtype == REG_TYPE_NQ)
1429 else if (reg_incr == -1)
1431 reg_incr = getreg - base_reg;
1432 if (reg_incr < 1 || reg_incr > 2)
1434 inst.error = _(incr_error);
1438 else if (getreg != base_reg + reg_incr * count)
1440 inst.error = _(incr_error);
1444 /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
1448 int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
1450 lane = NEON_INTERLEAVE_LANES;
1451 else if (lane != NEON_INTERLEAVE_LANES)
1453 inst.error = _(type_error);
1458 else if (reg_incr != 1)
1460 inst.error = _("don't use Rn-Rm syntax with non-unit stride");
1464 hireg = arm_reg_parse (&ptr, rtype, NULL);
1467 inst.error = _(reg_expected_msgs[rtype]);
1470 count += hireg + dregs - getreg;
1474 /* If we're using Q registers, we can't use [] or [n] syntax. */
1475 if (rtype == REG_TYPE_NQ)
1481 if (skip_past_char (&ptr, '[') == SUCCESS)
1483 if (skip_past_char (&ptr, ']') == SUCCESS)
1486 lane = NEON_ALL_LANES;
1487 else if (lane != NEON_ALL_LANES)
1489 inst.error = _(type_error);
1496 my_get_expression (&exp, &ptr, GE_NO_PREFIX);
1497 if (exp.X_op != O_constant)
1499 inst.error = _("constant expression required");
1503 lane = exp.X_add_number;
1504 else if (lane != exp.X_add_number)
1506 inst.error = _(type_error);
1510 if (skip_past_char (&ptr, ']') == FAIL)
1512 inst.error = _("expected ]");
1517 else if (lane == -1)
1518 lane = NEON_INTERLEAVE_LANES;
1519 else if (lane != NEON_INTERLEAVE_LANES)
1521 inst.error = _(type_error);
1526 while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
1528 /* No lane set by [x]. We must be interleaving structures. */
1530 lane = NEON_INTERLEAVE_LANES;
1533 if (lane == -1 || base_reg == -1 || count < 1 || count > 4
1534 || (count > 1 && reg_incr == -1))
1536 inst.error = _("error parsing element/structure list");
1540 if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
1542 inst.error = _("expected }");
1552 return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
1555 /* Parse an explicit relocation suffix on an expression. This is
1556 either nothing, or a word in parentheses. Note that if !OBJ_ELF,
1557 arm_reloc_hsh contains no entries, so this function can only
1558 succeed if there is no () after the word. Returns -1 on error,
1559 BFD_RELOC_UNUSED if there wasn't any suffix. */
1561 parse_reloc (char **str)
1563 struct reloc_entry *r;
1567 return BFD_RELOC_UNUSED;
1572 while (*q && *q != ')' && *q != ',')
1577 if ((r = hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
1584 /* Directives: register aliases. */
1587 insert_reg_alias (char *str, int number, int type)
1589 struct reg_entry *new;
1592 if ((new = hash_find (arm_reg_hsh, str)) != 0)
1595 as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
1597 /* Only warn about a redefinition if it's not defined as the
1599 else if (new->number != number || new->type != type)
1600 as_warn (_("ignoring redefinition of register alias '%s'"), str);
1605 name = xstrdup (str);
1606 new = xmalloc (sizeof (struct reg_entry));
1609 new->number = number;
1611 new->builtin = FALSE;
1613 if (hash_insert (arm_reg_hsh, name, (PTR) new))
1617 /* Look for the .req directive. This is of the form:
1619 new_register_name .req existing_register_name
1621 If we find one, or if it looks sufficiently like one that we want to
1622 handle any error here, return non-zero. Otherwise return zero. */
1625 create_register_alias (char * newname, char *p)
1627 struct reg_entry *old;
1628 char *oldname, *nbuf;
1631 /* The input scrubber ensures that whitespace after the mnemonic is
1632 collapsed to single spaces. */
1634 if (strncmp (oldname, " .req ", 6) != 0)
1638 if (*oldname == '\0')
1641 old = hash_find (arm_reg_hsh, oldname);
1644 as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1648 /* If TC_CASE_SENSITIVE is defined, then newname already points to
1649 the desired alias name, and p points to its end. If not, then
1650 the desired alias name is in the global original_case_string. */
1651 #ifdef TC_CASE_SENSITIVE
1654 newname = original_case_string;
1655 nlen = strlen (newname);
1658 nbuf = alloca (nlen + 1);
1659 memcpy (nbuf, newname, nlen);
1662 /* Create aliases under the new name as stated; an all-lowercase
1663 version of the new name; and an all-uppercase version of the new
1665 insert_reg_alias (nbuf, old->number, old->type);
1667 for (p = nbuf; *p; p++)
1670 if (strncmp (nbuf, newname, nlen))
1671 insert_reg_alias (nbuf, old->number, old->type);
1673 for (p = nbuf; *p; p++)
1676 if (strncmp (nbuf, newname, nlen))
1677 insert_reg_alias (nbuf, old->number, old->type);
1682 /* Should never be called, as .req goes between the alias and the
1683 register name, not at the beginning of the line. */
1685 s_req (int a ATTRIBUTE_UNUSED)
1687 as_bad (_("invalid syntax for .req directive"));
1690 /* The .unreq directive deletes an alias which was previously defined
1691 by .req. For example:
1697 s_unreq (int a ATTRIBUTE_UNUSED)
1702 name = input_line_pointer;
1704 while (*input_line_pointer != 0
1705 && *input_line_pointer != ' '
1706 && *input_line_pointer != '\n')
1707 ++input_line_pointer;
1709 saved_char = *input_line_pointer;
1710 *input_line_pointer = 0;
1713 as_bad (_("invalid syntax for .unreq directive"));
1716 struct reg_entry *reg = hash_find (arm_reg_hsh, name);
1719 as_bad (_("unknown register alias '%s'"), name);
1720 else if (reg->builtin)
1721 as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1725 hash_delete (arm_reg_hsh, name);
1726 free ((char *) reg->name);
1731 *input_line_pointer = saved_char;
1732 demand_empty_rest_of_line ();
1735 /* Directives: Instruction set selection. */
1738 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
1739 (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
1740 Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1741 and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped. */
1743 static enum mstate mapstate = MAP_UNDEFINED;
1746 mapping_state (enum mstate state)
1749 const char * symname;
1752 if (mapstate == state)
1753 /* The mapping symbol has already been emitted.
1754 There is nothing else to do. */
1763 type = BSF_NO_FLAGS;
1767 type = BSF_NO_FLAGS;
1771 type = BSF_NO_FLAGS;
1779 seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1781 symbolP = symbol_new (symname, now_seg, (valueT) frag_now_fix (), frag_now);
1782 symbol_table_insert (symbolP);
1783 symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1788 THUMB_SET_FUNC (symbolP, 0);
1789 ARM_SET_THUMB (symbolP, 0);
1790 ARM_SET_INTERWORK (symbolP, support_interwork);
1794 THUMB_SET_FUNC (symbolP, 1);
1795 ARM_SET_THUMB (symbolP, 1);
1796 ARM_SET_INTERWORK (symbolP, support_interwork);
1805 #define mapping_state(x) /* nothing */
1808 /* Find the real, Thumb encoded start of a Thumb function. */
1811 find_real_start (symbolS * symbolP)
1814 const char * name = S_GET_NAME (symbolP);
1815 symbolS * new_target;
1817 /* This definition must agree with the one in gcc/config/arm/thumb.c. */
1818 #define STUB_NAME ".real_start_of"
1823 /* The compiler may generate BL instructions to local labels because
1824 it needs to perform a branch to a far away location. These labels
1825 do not have a corresponding ".real_start_of" label. We check
1826 both for S_IS_LOCAL and for a leading dot, to give a way to bypass
1827 the ".real_start_of" convention for nonlocal branches. */
1828 if (S_IS_LOCAL (symbolP) || name[0] == '.')
1831 real_start = ACONCAT ((STUB_NAME, name, NULL));
1832 new_target = symbol_find (real_start);
1834 if (new_target == NULL)
1836 as_warn ("Failed to find real start of function: %s\n", name);
1837 new_target = symbolP;
1844 opcode_select (int width)
1851 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
1852 as_bad (_("selected processor does not support THUMB opcodes"));
1855 /* No need to force the alignment, since we will have been
1856 coming from ARM mode, which is word-aligned. */
1857 record_alignment (now_seg, 1);
1859 mapping_state (MAP_THUMB);
1865 if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
1866 as_bad (_("selected processor does not support ARM opcodes"));
1871 frag_align (2, 0, 0);
1873 record_alignment (now_seg, 1);
1875 mapping_state (MAP_ARM);
1879 as_bad (_("invalid instruction size selected (%d)"), width);
1884 s_arm (int ignore ATTRIBUTE_UNUSED)
1887 demand_empty_rest_of_line ();
1891 s_thumb (int ignore ATTRIBUTE_UNUSED)
1894 demand_empty_rest_of_line ();
1898 s_code (int unused ATTRIBUTE_UNUSED)
1902 temp = get_absolute_expression ();
1907 opcode_select (temp);
1911 as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
1916 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
1918 /* If we are not already in thumb mode go into it, EVEN if
1919 the target processor does not support thumb instructions.
1920 This is used by gcc/config/arm/lib1funcs.asm for example
1921 to compile interworking support functions even if the
1922 target processor should not support interworking. */
1926 record_alignment (now_seg, 1);
1929 demand_empty_rest_of_line ();
1933 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
1937 /* The following label is the name/address of the start of a Thumb function.
1938 We need to know this for the interworking support. */
1939 label_is_thumb_function_name = TRUE;
1942 /* Perform a .set directive, but also mark the alias as
1943 being a thumb function. */
1946 s_thumb_set (int equiv)
1948 /* XXX the following is a duplicate of the code for s_set() in read.c
1949 We cannot just call that code as we need to get at the symbol that
1956 /* Especial apologies for the random logic:
1957 This just grew, and could be parsed much more simply!
1959 name = input_line_pointer;
1960 delim = get_symbol_end ();
1961 end_name = input_line_pointer;
1964 if (*input_line_pointer != ',')
1967 as_bad (_("expected comma after name \"%s\""), name);
1969 ignore_rest_of_line ();
1973 input_line_pointer++;
1976 if (name[0] == '.' && name[1] == '\0')
1978 /* XXX - this should not happen to .thumb_set. */
1982 if ((symbolP = symbol_find (name)) == NULL
1983 && (symbolP = md_undefined_symbol (name)) == NULL)
1986 /* When doing symbol listings, play games with dummy fragments living
1987 outside the normal fragment chain to record the file and line info
1989 if (listing & LISTING_SYMBOLS)
1991 extern struct list_info_struct * listing_tail;
1992 fragS * dummy_frag = xmalloc (sizeof (fragS));
1994 memset (dummy_frag, 0, sizeof (fragS));
1995 dummy_frag->fr_type = rs_fill;
1996 dummy_frag->line = listing_tail;
1997 symbolP = symbol_new (name, undefined_section, 0, dummy_frag);
1998 dummy_frag->fr_symbol = symbolP;
2002 symbolP = symbol_new (name, undefined_section, 0, &zero_address_frag);
2005 /* "set" symbols are local unless otherwise specified. */
2006 SF_SET_LOCAL (symbolP);
2007 #endif /* OBJ_COFF */
2008 } /* Make a new symbol. */
2010 symbol_table_insert (symbolP);
2015 && S_IS_DEFINED (symbolP)
2016 && S_GET_SEGMENT (symbolP) != reg_section)
2017 as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
2019 pseudo_set (symbolP);
2021 demand_empty_rest_of_line ();
2023 /* XXX Now we come to the Thumb specific bit of code. */
2025 THUMB_SET_FUNC (symbolP, 1);
2026 ARM_SET_THUMB (symbolP, 1);
2027 #if defined OBJ_ELF || defined OBJ_COFF
2028 ARM_SET_INTERWORK (symbolP, support_interwork);
2032 /* Directives: Mode selection. */
2034 /* .syntax [unified|divided] - choose the new unified syntax
2035 (same for Arm and Thumb encoding, modulo slight differences in what
2036 can be represented) or the old divergent syntax for each mode. */
2038 s_syntax (int unused ATTRIBUTE_UNUSED)
2042 name = input_line_pointer;
2043 delim = get_symbol_end ();
2045 if (!strcasecmp (name, "unified"))
2046 unified_syntax = TRUE;
2047 else if (!strcasecmp (name, "divided"))
2048 unified_syntax = FALSE;
2051 as_bad (_("unrecognized syntax mode \"%s\""), name);
2054 *input_line_pointer = delim;
2055 demand_empty_rest_of_line ();
2058 /* Directives: sectioning and alignment. */
2060 /* Same as s_align_ptwo but align 0 => align 2. */
2063 s_align (int unused ATTRIBUTE_UNUSED)
2067 long max_alignment = 15;
2069 temp = get_absolute_expression ();
2070 if (temp > max_alignment)
2071 as_bad (_("alignment too large: %d assumed"), temp = max_alignment);
2074 as_bad (_("alignment negative. 0 assumed."));
2078 if (*input_line_pointer == ',')
2080 input_line_pointer++;
2081 temp_fill = get_absolute_expression ();
2089 /* Only make a frag if we HAVE to. */
2090 if (temp && !need_pass_2)
2091 frag_align (temp, (int) temp_fill, 0);
2092 demand_empty_rest_of_line ();
2094 record_alignment (now_seg, temp);
2098 s_bss (int ignore ATTRIBUTE_UNUSED)
2100 /* We don't support putting frags in the BSS segment, we fake it by
2101 marking in_bss, then looking at s_skip for clues. */
2102 subseg_set (bss_section, 0);
2103 demand_empty_rest_of_line ();
2104 mapping_state (MAP_DATA);
2108 s_even (int ignore ATTRIBUTE_UNUSED)
2110 /* Never make frag if expect extra pass. */
2112 frag_align (1, 0, 0);
2114 record_alignment (now_seg, 1);
2116 demand_empty_rest_of_line ();
2119 /* Directives: Literal pools. */
2121 static literal_pool *
2122 find_literal_pool (void)
2124 literal_pool * pool;
2126 for (pool = list_of_pools; pool != NULL; pool = pool->next)
2128 if (pool->section == now_seg
2129 && pool->sub_section == now_subseg)
2136 static literal_pool *
2137 find_or_make_literal_pool (void)
2139 /* Next literal pool ID number. */
2140 static unsigned int latest_pool_num = 1;
2141 literal_pool * pool;
2143 pool = find_literal_pool ();
2147 /* Create a new pool. */
2148 pool = xmalloc (sizeof (* pool));
2152 pool->next_free_entry = 0;
2153 pool->section = now_seg;
2154 pool->sub_section = now_subseg;
2155 pool->next = list_of_pools;
2156 pool->symbol = NULL;
2158 /* Add it to the list. */
2159 list_of_pools = pool;
2162 /* New pools, and emptied pools, will have a NULL symbol. */
2163 if (pool->symbol == NULL)
2165 pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
2166 (valueT) 0, &zero_address_frag);
2167 pool->id = latest_pool_num ++;
2174 /* Add the literal in the global 'inst'
2175 structure to the relevent literal pool. */
2178 add_to_lit_pool (void)
2180 literal_pool * pool;
2183 pool = find_or_make_literal_pool ();
2185 /* Check if this literal value is already in the pool. */
2186 for (entry = 0; entry < pool->next_free_entry; entry ++)
2188 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2189 && (inst.reloc.exp.X_op == O_constant)
2190 && (pool->literals[entry].X_add_number
2191 == inst.reloc.exp.X_add_number)
2192 && (pool->literals[entry].X_unsigned
2193 == inst.reloc.exp.X_unsigned))
2196 if ((pool->literals[entry].X_op == inst.reloc.exp.X_op)
2197 && (inst.reloc.exp.X_op == O_symbol)
2198 && (pool->literals[entry].X_add_number
2199 == inst.reloc.exp.X_add_number)
2200 && (pool->literals[entry].X_add_symbol
2201 == inst.reloc.exp.X_add_symbol)
2202 && (pool->literals[entry].X_op_symbol
2203 == inst.reloc.exp.X_op_symbol))
2207 /* Do we need to create a new entry? */
2208 if (entry == pool->next_free_entry)
2210 if (entry >= MAX_LITERAL_POOL_SIZE)
2212 inst.error = _("literal pool overflow");
2216 pool->literals[entry] = inst.reloc.exp;
2217 pool->next_free_entry += 1;
2220 inst.reloc.exp.X_op = O_symbol;
2221 inst.reloc.exp.X_add_number = ((int) entry) * 4;
2222 inst.reloc.exp.X_add_symbol = pool->symbol;
2227 /* Can't use symbol_new here, so have to create a symbol and then at
2228 a later date assign it a value. Thats what these functions do. */
2231 symbol_locate (symbolS * symbolP,
2232 const char * name, /* It is copied, the caller can modify. */
2233 segT segment, /* Segment identifier (SEG_<something>). */
2234 valueT valu, /* Symbol value. */
2235 fragS * frag) /* Associated fragment. */
2237 unsigned int name_length;
2238 char * preserved_copy_of_name;
2240 name_length = strlen (name) + 1; /* +1 for \0. */
2241 obstack_grow (¬es, name, name_length);
2242 preserved_copy_of_name = obstack_finish (¬es);
2244 #ifdef tc_canonicalize_symbol_name
2245 preserved_copy_of_name =
2246 tc_canonicalize_symbol_name (preserved_copy_of_name);
2249 S_SET_NAME (symbolP, preserved_copy_of_name);
2251 S_SET_SEGMENT (symbolP, segment);
2252 S_SET_VALUE (symbolP, valu);
2253 symbol_clear_list_pointers (symbolP);
2255 symbol_set_frag (symbolP, frag);
2257 /* Link to end of symbol chain. */
2259 extern int symbol_table_frozen;
2261 if (symbol_table_frozen)
2265 symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
2267 obj_symbol_new_hook (symbolP);
2269 #ifdef tc_symbol_new_hook
2270 tc_symbol_new_hook (symbolP);
2274 verify_symbol_chain (symbol_rootP, symbol_lastP);
2275 #endif /* DEBUG_SYMS */
2280 s_ltorg (int ignored ATTRIBUTE_UNUSED)
2283 literal_pool * pool;
2286 pool = find_literal_pool ();
2288 || pool->symbol == NULL
2289 || pool->next_free_entry == 0)
2292 mapping_state (MAP_DATA);
2294 /* Align pool as you have word accesses.
2295 Only make a frag if we have to. */
2297 frag_align (2, 0, 0);
2299 record_alignment (now_seg, 2);
2301 sprintf (sym_name, "$$lit_\002%x", pool->id);
2303 symbol_locate (pool->symbol, sym_name, now_seg,
2304 (valueT) frag_now_fix (), frag_now);
2305 symbol_table_insert (pool->symbol);
2307 ARM_SET_THUMB (pool->symbol, thumb_mode);
2309 #if defined OBJ_COFF || defined OBJ_ELF
2310 ARM_SET_INTERWORK (pool->symbol, support_interwork);
2313 for (entry = 0; entry < pool->next_free_entry; entry ++)
2314 /* First output the expression in the instruction to the pool. */
2315 emit_expr (&(pool->literals[entry]), 4); /* .word */
2317 /* Mark the pool as empty. */
2318 pool->next_free_entry = 0;
2319 pool->symbol = NULL;
2323 /* Forward declarations for functions below, in the MD interface
2325 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
2326 static valueT create_unwind_entry (int);
2327 static void start_unwind_section (const segT, int);
2328 static void add_unwind_opcode (valueT, int);
2329 static void flush_pending_unwind (void);
2331 /* Directives: Data. */
2334 s_arm_elf_cons (int nbytes)
2338 #ifdef md_flush_pending_output
2339 md_flush_pending_output ();
2342 if (is_it_end_of_statement ())
2344 demand_empty_rest_of_line ();
2348 #ifdef md_cons_align
2349 md_cons_align (nbytes);
2352 mapping_state (MAP_DATA);
2356 char *base = input_line_pointer;
2360 if (exp.X_op != O_symbol)
2361 emit_expr (&exp, (unsigned int) nbytes);
2364 char *before_reloc = input_line_pointer;
2365 reloc = parse_reloc (&input_line_pointer);
2368 as_bad (_("unrecognized relocation suffix"));
2369 ignore_rest_of_line ();
2372 else if (reloc == BFD_RELOC_UNUSED)
2373 emit_expr (&exp, (unsigned int) nbytes);
2376 reloc_howto_type *howto = bfd_reloc_type_lookup (stdoutput, reloc);
2377 int size = bfd_get_reloc_size (howto);
2379 if (reloc == BFD_RELOC_ARM_PLT32)
2381 as_bad (_("(plt) is only valid on branch targets"));
2382 reloc = BFD_RELOC_UNUSED;
2387 as_bad (_("%s relocations do not fit in %d bytes"),
2388 howto->name, nbytes);
2391 /* We've parsed an expression stopping at O_symbol.
2392 But there may be more expression left now that we
2393 have parsed the relocation marker. Parse it again.
2394 XXX Surely there is a cleaner way to do this. */
2395 char *p = input_line_pointer;
2397 char *save_buf = alloca (input_line_pointer - base);
2398 memcpy (save_buf, base, input_line_pointer - base);
2399 memmove (base + (input_line_pointer - before_reloc),
2400 base, before_reloc - base);
2402 input_line_pointer = base + (input_line_pointer-before_reloc);
2404 memcpy (base, save_buf, p - base);
2406 offset = nbytes - size;
2407 p = frag_more ((int) nbytes);
2408 fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
2409 size, &exp, 0, reloc);
2414 while (*input_line_pointer++ == ',');
2416 /* Put terminator back into stream. */
2417 input_line_pointer --;
2418 demand_empty_rest_of_line ();
2422 /* Parse a .rel31 directive. */
2425 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
2432 if (*input_line_pointer == '1')
2433 highbit = 0x80000000;
2434 else if (*input_line_pointer != '0')
2435 as_bad (_("expected 0 or 1"));
2437 input_line_pointer++;
2438 if (*input_line_pointer != ',')
2439 as_bad (_("missing comma"));
2440 input_line_pointer++;
2442 #ifdef md_flush_pending_output
2443 md_flush_pending_output ();
2446 #ifdef md_cons_align
2450 mapping_state (MAP_DATA);
2455 md_number_to_chars (p, highbit, 4);
2456 fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
2457 BFD_RELOC_ARM_PREL31);
2459 demand_empty_rest_of_line ();
2462 /* Directives: AEABI stack-unwind tables. */
2464 /* Parse an unwind_fnstart directive. Simply records the current location. */
2467 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
2469 demand_empty_rest_of_line ();
2470 /* Mark the start of the function. */
2471 unwind.proc_start = expr_build_dot ();
2473 /* Reset the rest of the unwind info. */
2474 unwind.opcode_count = 0;
2475 unwind.table_entry = NULL;
2476 unwind.personality_routine = NULL;
2477 unwind.personality_index = -1;
2478 unwind.frame_size = 0;
2479 unwind.fp_offset = 0;
2482 unwind.sp_restored = 0;
2486 /* Parse a handlerdata directive. Creates the exception handling table entry
2487 for the function. */
2490 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
2492 demand_empty_rest_of_line ();
2493 if (unwind.table_entry)
2494 as_bad (_("dupicate .handlerdata directive"));
2496 create_unwind_entry (1);
2499 /* Parse an unwind_fnend directive. Generates the index table entry. */
2502 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
2508 demand_empty_rest_of_line ();
2510 /* Add eh table entry. */
2511 if (unwind.table_entry == NULL)
2512 val = create_unwind_entry (0);
2516 /* Add index table entry. This is two words. */
2517 start_unwind_section (unwind.saved_seg, 1);
2518 frag_align (2, 0, 0);
2519 record_alignment (now_seg, 2);
2521 ptr = frag_more (8);
2522 where = frag_now_fix () - 8;
2524 /* Self relative offset of the function start. */
2525 fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
2526 BFD_RELOC_ARM_PREL31);
2528 /* Indicate dependency on EHABI-defined personality routines to the
2529 linker, if it hasn't been done already. */
2530 if (unwind.personality_index >= 0 && unwind.personality_index < 3
2531 && !(marked_pr_dependency & (1 << unwind.personality_index)))
2533 static const char *const name[] = {
2534 "__aeabi_unwind_cpp_pr0",
2535 "__aeabi_unwind_cpp_pr1",
2536 "__aeabi_unwind_cpp_pr2"
2538 symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
2539 fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
2540 marked_pr_dependency |= 1 << unwind.personality_index;
2541 seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
2542 = marked_pr_dependency;
2546 /* Inline exception table entry. */
2547 md_number_to_chars (ptr + 4, val, 4);
2549 /* Self relative offset of the table entry. */
2550 fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
2551 BFD_RELOC_ARM_PREL31);
2553 /* Restore the original section. */
2554 subseg_set (unwind.saved_seg, unwind.saved_subseg);
2558 /* Parse an unwind_cantunwind directive. */
2561 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
2563 demand_empty_rest_of_line ();
2564 if (unwind.personality_routine || unwind.personality_index != -1)
2565 as_bad (_("personality routine specified for cantunwind frame"));
2567 unwind.personality_index = -2;
2571 /* Parse a personalityindex directive. */
2574 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
2578 if (unwind.personality_routine || unwind.personality_index != -1)
2579 as_bad (_("duplicate .personalityindex directive"));
2583 if (exp.X_op != O_constant
2584 || exp.X_add_number < 0 || exp.X_add_number > 15)
2586 as_bad (_("bad personality routine number"));
2587 ignore_rest_of_line ();
2591 unwind.personality_index = exp.X_add_number;
2593 demand_empty_rest_of_line ();
2597 /* Parse a personality directive. */
2600 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
2604 if (unwind.personality_routine || unwind.personality_index != -1)
2605 as_bad (_("duplicate .personality directive"));
2607 name = input_line_pointer;
2608 c = get_symbol_end ();
2609 p = input_line_pointer;
2610 unwind.personality_routine = symbol_find_or_make (name);
2612 demand_empty_rest_of_line ();
2616 /* Parse a directive saving core registers. */
2619 s_arm_unwind_save_core (void)
2625 range = parse_reg_list (&input_line_pointer);
2628 as_bad (_("expected register list"));
2629 ignore_rest_of_line ();
2633 demand_empty_rest_of_line ();
2635 /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
2636 into .unwind_save {..., sp...}. We aren't bothered about the value of
2637 ip because it is clobbered by calls. */
2638 if (unwind.sp_restored && unwind.fp_reg == 12
2639 && (range & 0x3000) == 0x1000)
2641 unwind.opcode_count--;
2642 unwind.sp_restored = 0;
2643 range = (range | 0x2000) & ~0x1000;
2644 unwind.pending_offset = 0;
2650 /* See if we can use the short opcodes. These pop a block of up to 8
2651 registers starting with r4, plus maybe r14. */
2652 for (n = 0; n < 8; n++)
2654 /* Break at the first non-saved register. */
2655 if ((range & (1 << (n + 4))) == 0)
2658 /* See if there are any other bits set. */
2659 if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
2661 /* Use the long form. */
2662 op = 0x8000 | ((range >> 4) & 0xfff);
2663 add_unwind_opcode (op, 2);
2667 /* Use the short form. */
2669 op = 0xa8; /* Pop r14. */
2671 op = 0xa0; /* Do not pop r14. */
2673 add_unwind_opcode (op, 1);
2680 op = 0xb100 | (range & 0xf);
2681 add_unwind_opcode (op, 2);
2684 /* Record the number of bytes pushed. */
2685 for (n = 0; n < 16; n++)
2687 if (range & (1 << n))
2688 unwind.frame_size += 4;
2693 /* Parse a directive saving FPA registers. */
2696 s_arm_unwind_save_fpa (int reg)
2702 /* Get Number of registers to transfer. */
2703 if (skip_past_comma (&input_line_pointer) != FAIL)
2706 exp.X_op = O_illegal;
2708 if (exp.X_op != O_constant)
2710 as_bad (_("expected , <constant>"));
2711 ignore_rest_of_line ();
2715 num_regs = exp.X_add_number;
2717 if (num_regs < 1 || num_regs > 4)
2719 as_bad (_("number of registers must be in the range [1:4]"));
2720 ignore_rest_of_line ();
2724 demand_empty_rest_of_line ();
2729 op = 0xb4 | (num_regs - 1);
2730 add_unwind_opcode (op, 1);
2735 op = 0xc800 | (reg << 4) | (num_regs - 1);
2736 add_unwind_opcode (op, 2);
2738 unwind.frame_size += num_regs * 12;
2742 /* Parse a directive saving VFP registers. */
2745 s_arm_unwind_save_vfp (void)
2751 count = parse_vfp_reg_list (&input_line_pointer, ®, REGLIST_VFP_D);
2754 as_bad (_("expected register list"));
2755 ignore_rest_of_line ();
2759 demand_empty_rest_of_line ();
2764 op = 0xb8 | (count - 1);
2765 add_unwind_opcode (op, 1);
2770 op = 0xb300 | (reg << 4) | (count - 1);
2771 add_unwind_opcode (op, 2);
2773 unwind.frame_size += count * 8 + 4;
2777 /* Parse a directive saving iWMMXt data registers. */
2780 s_arm_unwind_save_mmxwr (void)
2788 if (*input_line_pointer == '{')
2789 input_line_pointer++;
2793 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR, NULL);
2797 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
2802 as_tsktsk (_("register list not in ascending order"));
2805 if (*input_line_pointer == '-')
2807 input_line_pointer++;
2808 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR, NULL);
2811 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWR]));
2814 else if (reg >= hi_reg)
2816 as_bad (_("bad register range"));
2819 for (; reg < hi_reg; reg++)
2823 while (skip_past_comma (&input_line_pointer) != FAIL);
2825 if (*input_line_pointer == '}')
2826 input_line_pointer++;
2828 demand_empty_rest_of_line ();
2830 /* Generate any deferred opcodes because we're going to be looking at
2832 flush_pending_unwind ();
2834 for (i = 0; i < 16; i++)
2836 if (mask & (1 << i))
2837 unwind.frame_size += 8;
2840 /* Attempt to combine with a previous opcode. We do this because gcc
2841 likes to output separate unwind directives for a single block of
2843 if (unwind.opcode_count > 0)
2845 i = unwind.opcodes[unwind.opcode_count - 1];
2846 if ((i & 0xf8) == 0xc0)
2849 /* Only merge if the blocks are contiguous. */
2852 if ((mask & 0xfe00) == (1 << 9))
2854 mask |= ((1 << (i + 11)) - 1) & 0xfc00;
2855 unwind.opcode_count--;
2858 else if (i == 6 && unwind.opcode_count >= 2)
2860 i = unwind.opcodes[unwind.opcode_count - 2];
2864 op = 0xffff << (reg - 1);
2866 || ((mask & op) == (1u << (reg - 1))))
2868 op = (1 << (reg + i + 1)) - 1;
2869 op &= ~((1 << reg) - 1);
2871 unwind.opcode_count -= 2;
2878 /* We want to generate opcodes in the order the registers have been
2879 saved, ie. descending order. */
2880 for (reg = 15; reg >= -1; reg--)
2882 /* Save registers in blocks. */
2884 || !(mask & (1 << reg)))
2886 /* We found an unsaved reg. Generate opcodes to save the
2887 preceeding block. */
2893 op = 0xc0 | (hi_reg - 10);
2894 add_unwind_opcode (op, 1);
2899 op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
2900 add_unwind_opcode (op, 2);
2909 ignore_rest_of_line ();
2913 s_arm_unwind_save_mmxwcg (void)
2920 if (*input_line_pointer == '{')
2921 input_line_pointer++;
2925 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG, NULL);
2929 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
2935 as_tsktsk (_("register list not in ascending order"));
2938 if (*input_line_pointer == '-')
2940 input_line_pointer++;
2941 hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG, NULL);
2944 as_bad (_(reg_expected_msgs[REG_TYPE_MMXWCG]));
2947 else if (reg >= hi_reg)
2949 as_bad (_("bad register range"));
2952 for (; reg < hi_reg; reg++)
2956 while (skip_past_comma (&input_line_pointer) != FAIL);
2958 if (*input_line_pointer == '}')
2959 input_line_pointer++;
2961 demand_empty_rest_of_line ();
2963 /* Generate any deferred opcodes because we're going to be looking at
2965 flush_pending_unwind ();
2967 for (reg = 0; reg < 16; reg++)
2969 if (mask & (1 << reg))
2970 unwind.frame_size += 4;
2973 add_unwind_opcode (op, 2);
2976 ignore_rest_of_line ();
2980 /* Parse an unwind_save directive. */
2983 s_arm_unwind_save (int ignored ATTRIBUTE_UNUSED)
2986 struct reg_entry *reg;
2987 bfd_boolean had_brace = FALSE;
2989 /* Figure out what sort of save we have. */
2990 peek = input_line_pointer;
2998 reg = arm_reg_parse_multi (&peek);
3002 as_bad (_("register expected"));
3003 ignore_rest_of_line ();
3012 as_bad (_("FPA .unwind_save does not take a register list"));
3013 ignore_rest_of_line ();
3016 s_arm_unwind_save_fpa (reg->number);
3019 case REG_TYPE_RN: s_arm_unwind_save_core (); return;
3020 case REG_TYPE_VFD: s_arm_unwind_save_vfp (); return;
3021 case REG_TYPE_MMXWR: s_arm_unwind_save_mmxwr (); return;
3022 case REG_TYPE_MMXWCG: s_arm_unwind_save_mmxwcg (); return;
3025 as_bad (_(".unwind_save does not support this kind of register"));
3026 ignore_rest_of_line ();
3031 /* Parse an unwind_movsp directive. */
3034 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
3039 reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN, NULL);
3042 as_bad (_(reg_expected_msgs[REG_TYPE_RN]));
3043 ignore_rest_of_line ();
3046 demand_empty_rest_of_line ();
3048 if (reg == REG_SP || reg == REG_PC)
3050 as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
3054 if (unwind.fp_reg != REG_SP)
3055 as_bad (_("unexpected .unwind_movsp directive"));
3057 /* Generate opcode to restore the value. */
3059 add_unwind_opcode (op, 1);
3061 /* Record the information for later. */
3062 unwind.fp_reg = reg;
3063 unwind.fp_offset = unwind.frame_size;
3064 unwind.sp_restored = 1;
3067 /* Parse an unwind_pad directive. */
3070 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
3074 if (immediate_for_directive (&offset) == FAIL)
3079 as_bad (_("stack increment must be multiple of 4"));
3080 ignore_rest_of_line ();
3084 /* Don't generate any opcodes, just record the details for later. */
3085 unwind.frame_size += offset;
3086 unwind.pending_offset += offset;
3088 demand_empty_rest_of_line ();
3091 /* Parse an unwind_setfp directive. */
3094 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
3100 fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN, NULL);
3101 if (skip_past_comma (&input_line_pointer) == FAIL)
3104 sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN, NULL);
3106 if (fp_reg == FAIL || sp_reg == FAIL)
3108 as_bad (_("expected <reg>, <reg>"));
3109 ignore_rest_of_line ();
3113 /* Optional constant. */
3114 if (skip_past_comma (&input_line_pointer) != FAIL)
3116 if (immediate_for_directive (&offset) == FAIL)
3122 demand_empty_rest_of_line ();
3124 if (sp_reg != 13 && sp_reg != unwind.fp_reg)
3126 as_bad (_("register must be either sp or set by a previous"
3127 "unwind_movsp directive"));
3131 /* Don't generate any opcodes, just record the information for later. */
3132 unwind.fp_reg = fp_reg;
3135 unwind.fp_offset = unwind.frame_size - offset;
3137 unwind.fp_offset -= offset;
3140 /* Parse an unwind_raw directive. */
3143 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
3146 /* This is an arbitrary limit. */
3147 unsigned char op[16];
3151 if (exp.X_op == O_constant
3152 && skip_past_comma (&input_line_pointer) != FAIL)
3154 unwind.frame_size += exp.X_add_number;
3158 exp.X_op = O_illegal;
3160 if (exp.X_op != O_constant)
3162 as_bad (_("expected <offset>, <opcode>"));
3163 ignore_rest_of_line ();
3169 /* Parse the opcode. */
3174 as_bad (_("unwind opcode too long"));
3175 ignore_rest_of_line ();
3177 if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
3179 as_bad (_("invalid unwind opcode"));
3180 ignore_rest_of_line ();
3183 op[count++] = exp.X_add_number;
3185 /* Parse the next byte. */
3186 if (skip_past_comma (&input_line_pointer) == FAIL)
3192 /* Add the opcode bytes in reverse order. */
3194 add_unwind_opcode (op[count], 1);
3196 demand_empty_rest_of_line ();
3200 /* Parse a .eabi_attribute directive. */
3203 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
3206 bfd_boolean is_string;
3213 if (exp.X_op != O_constant)
3216 tag = exp.X_add_number;
3217 if (tag == 4 || tag == 5 || tag == 32 || (tag > 32 && (tag & 1) != 0))
3222 if (skip_past_comma (&input_line_pointer) == FAIL)
3224 if (tag == 32 || !is_string)
3227 if (exp.X_op != O_constant)
3229 as_bad (_("expected numeric constant"));
3230 ignore_rest_of_line ();
3233 i = exp.X_add_number;
3235 if (tag == Tag_compatibility
3236 && skip_past_comma (&input_line_pointer) == FAIL)
3238 as_bad (_("expected comma"));
3239 ignore_rest_of_line ();
3244 skip_whitespace(input_line_pointer);
3245 if (*input_line_pointer != '"')
3247 input_line_pointer++;
3248 s = input_line_pointer;
3249 while (*input_line_pointer && *input_line_pointer != '"')
3250 input_line_pointer++;
3251 if (*input_line_pointer != '"')
3253 saved_char = *input_line_pointer;
3254 *input_line_pointer = 0;
3262 if (tag == Tag_compatibility)
3263 elf32_arm_add_eabi_attr_compat (stdoutput, i, s);
3265 elf32_arm_add_eabi_attr_string (stdoutput, tag, s);
3267 elf32_arm_add_eabi_attr_int (stdoutput, tag, i);
3271 *input_line_pointer = saved_char;
3272 input_line_pointer++;
3274 demand_empty_rest_of_line ();
3277 as_bad (_("bad string constant"));
3278 ignore_rest_of_line ();
3281 as_bad (_("expected <tag> , <value>"));
3282 ignore_rest_of_line ();
3284 #endif /* OBJ_ELF */
3286 static void s_arm_arch (int);
3287 static void s_arm_cpu (int);
3288 static void s_arm_fpu (int);
3290 /* This table describes all the machine specific pseudo-ops the assembler
3291 has to support. The fields are:
3292 pseudo-op name without dot
3293 function to call to execute this pseudo-op
3294 Integer arg to pass to the function. */
3296 const pseudo_typeS md_pseudo_table[] =
3298 /* Never called because '.req' does not start a line. */
3299 { "req", s_req, 0 },
3300 { "unreq", s_unreq, 0 },
3301 { "bss", s_bss, 0 },
3302 { "align", s_align, 0 },
3303 { "arm", s_arm, 0 },
3304 { "thumb", s_thumb, 0 },
3305 { "code", s_code, 0 },
3306 { "force_thumb", s_force_thumb, 0 },
3307 { "thumb_func", s_thumb_func, 0 },
3308 { "thumb_set", s_thumb_set, 0 },
3309 { "even", s_even, 0 },
3310 { "ltorg", s_ltorg, 0 },
3311 { "pool", s_ltorg, 0 },
3312 { "syntax", s_syntax, 0 },
3313 { "cpu", s_arm_cpu, 0 },
3314 { "arch", s_arm_arch, 0 },
3315 { "fpu", s_arm_fpu, 0 },
3317 { "word", s_arm_elf_cons, 4 },
3318 { "long", s_arm_elf_cons, 4 },
3319 { "rel31", s_arm_rel31, 0 },
3320 { "fnstart", s_arm_unwind_fnstart, 0 },
3321 { "fnend", s_arm_unwind_fnend, 0 },
3322 { "cantunwind", s_arm_unwind_cantunwind, 0 },
3323 { "personality", s_arm_unwind_personality, 0 },
3324 { "personalityindex", s_arm_unwind_personalityindex, 0 },
3325 { "handlerdata", s_arm_unwind_handlerdata, 0 },
3326 { "save", s_arm_unwind_save, 0 },
3327 { "movsp", s_arm_unwind_movsp, 0 },
3328 { "pad", s_arm_unwind_pad, 0 },
3329 { "setfp", s_arm_unwind_setfp, 0 },
3330 { "unwind_raw", s_arm_unwind_raw, 0 },
3331 { "eabi_attribute", s_arm_eabi_attribute, 0 },
3335 { "extend", float_cons, 'x' },
3336 { "ldouble", float_cons, 'x' },
3337 { "packed", float_cons, 'p' },
3341 /* Parser functions used exclusively in instruction operands. */
3343 /* Generic immediate-value read function for use in insn parsing.
3344 STR points to the beginning of the immediate (the leading #);
3345 VAL receives the value; if the value is outside [MIN, MAX]
3346 issue an error. PREFIX_OPT is true if the immediate prefix is
3350 parse_immediate (char **str, int *val, int min, int max,
3351 bfd_boolean prefix_opt)
3354 my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
3355 if (exp.X_op != O_constant)
3357 inst.error = _("constant expression required");
3361 if (exp.X_add_number < min || exp.X_add_number > max)
3363 inst.error = _("immediate value out of range");
3367 *val = exp.X_add_number;
3371 /* Less-generic immediate-value read function with the possibility of loading a
3372 big (64-bit) immediate, as required by Neon VMOV and VMVN immediate
3373 instructions. Puts the result directly in inst.operands[i]. */
3376 parse_big_immediate (char **str, int i)
3381 my_get_expression (&exp, &ptr, GE_OPT_PREFIX_BIG);
3383 if (exp.X_op == O_constant)
3384 inst.operands[i].imm = exp.X_add_number;
3385 else if (exp.X_op == O_big
3386 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number > 32
3387 && LITTLENUM_NUMBER_OF_BITS * exp.X_add_number <= 64)
3389 unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
3390 /* Bignums have their least significant bits in
3391 generic_bignum[0]. Make sure we put 32 bits in imm and
3392 32 bits in reg, in a (hopefully) portable way. */
3393 assert (parts != 0);
3394 inst.operands[i].imm = 0;
3395 for (j = 0; j < parts; j++, idx++)
3396 inst.operands[i].imm |= generic_bignum[idx]
3397 << (LITTLENUM_NUMBER_OF_BITS * j);
3398 inst.operands[i].reg = 0;
3399 for (j = 0; j < parts; j++, idx++)
3400 inst.operands[i].reg |= generic_bignum[idx]
3401 << (LITTLENUM_NUMBER_OF_BITS * j);
3402 inst.operands[i].regisimm = 1;
3412 /* Returns the pseudo-register number of an FPA immediate constant,
3413 or FAIL if there isn't a valid constant here. */
3416 parse_fpa_immediate (char ** str)
3418 LITTLENUM_TYPE words[MAX_LITTLENUMS];
3424 /* First try and match exact strings, this is to guarantee
3425 that some formats will work even for cross assembly. */
3427 for (i = 0; fp_const[i]; i++)
3429 if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
3433 *str += strlen (fp_const[i]);
3434 if (is_end_of_line[(unsigned char) **str])
3440 /* Just because we didn't get a match doesn't mean that the constant
3441 isn't valid, just that it is in a format that we don't
3442 automatically recognize. Try parsing it with the standard
3443 expression routines. */
3445 memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
3447 /* Look for a raw floating point number. */
3448 if ((save_in = atof_ieee (*str, 'x', words)) != NULL
3449 && is_end_of_line[(unsigned char) *save_in])
3451 for (i = 0; i < NUM_FLOAT_VALS; i++)
3453 for (j = 0; j < MAX_LITTLENUMS; j++)
3455 if (words[j] != fp_values[i][j])
3459 if (j == MAX_LITTLENUMS)
3467 /* Try and parse a more complex expression, this will probably fail
3468 unless the code uses a floating point prefix (eg "0f"). */
3469 save_in = input_line_pointer;
3470 input_line_pointer = *str;
3471 if (expression (&exp) == absolute_section
3472 && exp.X_op == O_big
3473 && exp.X_add_number < 0)
3475 /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
3477 if (gen_to_words (words, 5, (long) 15) == 0)
3479 for (i = 0; i < NUM_FLOAT_VALS; i++)
3481 for (j = 0; j < MAX_LITTLENUMS; j++)
3483 if (words[j] != fp_values[i][j])
3487 if (j == MAX_LITTLENUMS)
3489 *str = input_line_pointer;
3490 input_line_pointer = save_in;
3497 *str = input_line_pointer;
3498 input_line_pointer = save_in;
3499 inst.error = _("invalid FPA immediate expression");
3503 /* Shift operands. */
3506 SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX
3509 struct asm_shift_name
3512 enum shift_kind kind;
3515 /* Third argument to parse_shift. */
3516 enum parse_shift_mode
3518 NO_SHIFT_RESTRICT, /* Any kind of shift is accepted. */
3519 SHIFT_IMMEDIATE, /* Shift operand must be an immediate. */
3520 SHIFT_LSL_OR_ASR_IMMEDIATE, /* Shift must be LSL or ASR immediate. */
3521 SHIFT_ASR_IMMEDIATE, /* Shift must be ASR immediate. */
3522 SHIFT_LSL_IMMEDIATE, /* Shift must be LSL immediate. */
3525 /* Parse a <shift> specifier on an ARM data processing instruction.
3526 This has three forms:
3528 (LSL|LSR|ASL|ASR|ROR) Rs
3529 (LSL|LSR|ASL|ASR|ROR) #imm
3532 Note that ASL is assimilated to LSL in the instruction encoding, and
3533 RRX to ROR #0 (which cannot be written as such). */
3536 parse_shift (char **str, int i, enum parse_shift_mode mode)
3538 const struct asm_shift_name *shift_name;
3539 enum shift_kind shift;
3544 for (p = *str; ISALPHA (*p); p++)
3549 inst.error = _("shift expression expected");
3553 shift_name = hash_find_n (arm_shift_hsh, *str, p - *str);
3555 if (shift_name == NULL)
3557 inst.error = _("shift expression expected");
3561 shift = shift_name->kind;
3565 case NO_SHIFT_RESTRICT:
3566 case SHIFT_IMMEDIATE: break;
3568 case SHIFT_LSL_OR_ASR_IMMEDIATE:
3569 if (shift != SHIFT_LSL && shift != SHIFT_ASR)
3571 inst.error = _("'LSL' or 'ASR' required");
3576 case SHIFT_LSL_IMMEDIATE:
3577 if (shift != SHIFT_LSL)
3579 inst.error = _("'LSL' required");
3584 case SHIFT_ASR_IMMEDIATE:
3585 if (shift != SHIFT_ASR)
3587 inst.error = _("'ASR' required");
3595 if (shift != SHIFT_RRX)
3597 /* Whitespace can appear here if the next thing is a bare digit. */
3598 skip_whitespace (p);
3600 if (mode == NO_SHIFT_RESTRICT
3601 && (reg = arm_reg_parse (&p, REG_TYPE_RN, NULL)) != FAIL)
3603 inst.operands[i].imm = reg;
3604 inst.operands[i].immisreg = 1;
3606 else if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
3609 inst.operands[i].shift_kind = shift;
3610 inst.operands[i].shifted = 1;
3615 /* Parse a <shifter_operand> for an ARM data processing instruction:
3618 #<immediate>, <rotate>
3622 where <shift> is defined by parse_shift above, and <rotate> is a
3623 multiple of 2 between 0 and 30. Validation of immediate operands
3624 is deferred to md_apply_fix. */
3627 parse_shifter_operand (char **str, int i)
3632 if ((value = arm_reg_parse (str, REG_TYPE_RN, NULL)) != FAIL)
3634 inst.operands[i].reg = value;
3635 inst.operands[i].isreg = 1;
3637 /* parse_shift will override this if appropriate */
3638 inst.reloc.exp.X_op = O_constant;
3639 inst.reloc.exp.X_add_number = 0;
3641 if (skip_past_comma (str) == FAIL)
3644 /* Shift operation on register. */
3645 return parse_shift (str, i, NO_SHIFT_RESTRICT);
3648 if (my_get_expression (&inst.reloc.exp, str, GE_IMM_PREFIX))
3651 if (skip_past_comma (str) == SUCCESS)
3653 /* #x, y -- ie explicit rotation by Y. */
3654 if (my_get_expression (&expr, str, GE_NO_PREFIX))
3657 if (expr.X_op != O_constant || inst.reloc.exp.X_op != O_constant)
3659 inst.error = _("constant expression expected");
3663 value = expr.X_add_number;
3664 if (value < 0 || value > 30 || value % 2 != 0)
3666 inst.error = _("invalid rotation");
3669 if (inst.reloc.exp.X_add_number < 0 || inst.reloc.exp.X_add_number > 255)
3671 inst.error = _("invalid constant");
3675 /* Convert to decoded value. md_apply_fix will put it back. */
3676 inst.reloc.exp.X_add_number
3677 = (((inst.reloc.exp.X_add_number << (32 - value))
3678 | (inst.reloc.exp.X_add_number >> value)) & 0xffffffff);
3681 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
3682 inst.reloc.pc_rel = 0;
3686 /* Parse all forms of an ARM address expression. Information is written
3687 to inst.operands[i] and/or inst.reloc.
3689 Preindexed addressing (.preind=1):
3691 [Rn, #offset] .reg=Rn .reloc.exp=offset
3692 [Rn, +/-Rm] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3693 [Rn, +/-Rm, shift] .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3694 .shift_kind=shift .reloc.exp=shift_imm
3696 These three may have a trailing ! which causes .writeback to be set also.
3698 Postindexed addressing (.postind=1, .writeback=1):
3700 [Rn], #offset .reg=Rn .reloc.exp=offset
3701 [Rn], +/-Rm .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3702 [Rn], +/-Rm, shift .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
3703 .shift_kind=shift .reloc.exp=shift_imm
3705 Unindexed addressing (.preind=0, .postind=0):
3707 [Rn], {option} .reg=Rn .imm=option .immisreg=0
3711 [Rn]{!} shorthand for [Rn,#0]{!}
3712 =immediate .isreg=0 .reloc.exp=immediate
3713 label .reg=PC .reloc.pc_rel=1 .reloc.exp=label
3715 It is the caller's responsibility to check for addressing modes not
3716 supported by the instruction, and to set inst.reloc.type. */
3719 parse_address (char **str, int i)
3724 if (skip_past_char (&p, '[') == FAIL)
3726 if (skip_past_char (&p, '=') == FAIL)
3728 /* bare address - translate to PC-relative offset */
3729 inst.reloc.pc_rel = 1;
3730 inst.operands[i].reg = REG_PC;
3731 inst.operands[i].isreg = 1;
3732 inst.operands[i].preind = 1;
3734 /* else a load-constant pseudo op, no special treatment needed here */
3736 if (my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX))
3743 if ((reg = arm_reg_parse (&p, REG_TYPE_RN, NULL)) == FAIL)
3745 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
3748 inst.operands[i].reg = reg;
3749 inst.operands[i].isreg = 1;
3751 if (skip_past_comma (&p) == SUCCESS)
3753 inst.operands[i].preind = 1;
3756 else if (*p == '-') p++, inst.operands[i].negative = 1;
3758 if ((reg = arm_reg_parse (&p, REG_TYPE_RN, NULL)) != FAIL)
3760 inst.operands[i].imm = reg;
3761 inst.operands[i].immisreg = 1;
3763 if (skip_past_comma (&p) == SUCCESS)
3764 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
3767 else if (skip_past_char (&p, ':') == SUCCESS)
3769 /* FIXME: '@' should be used here, but it's filtered out by generic
3770 code before we get to see it here. This may be subject to
3773 my_get_expression (&exp, &p, GE_NO_PREFIX);
3774 if (exp.X_op != O_constant)
3776 inst.error = _("alignment must be constant");
3779 inst.operands[i].imm = exp.X_add_number << 8;
3780 inst.operands[i].immisalign = 1;
3781 /* Alignments are not pre-indexes. */
3782 inst.operands[i].preind = 0;
3786 if (inst.operands[i].negative)
3788 inst.operands[i].negative = 0;
3791 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
3796 if (skip_past_char (&p, ']') == FAIL)
3798 inst.error = _("']' expected");
3802 if (skip_past_char (&p, '!') == SUCCESS)
3803 inst.operands[i].writeback = 1;
3805 else if (skip_past_comma (&p) == SUCCESS)
3807 if (skip_past_char (&p, '{') == SUCCESS)
3809 /* [Rn], {expr} - unindexed, with option */
3810 if (parse_immediate (&p, &inst.operands[i].imm,
3811 0, 255, TRUE) == FAIL)
3814 if (skip_past_char (&p, '}') == FAIL)
3816 inst.error = _("'}' expected at end of 'option' field");
3819 if (inst.operands[i].preind)
3821 inst.error = _("cannot combine index with option");
3829 inst.operands[i].postind = 1;
3830 inst.operands[i].writeback = 1;
3832 if (inst.operands[i].preind)
3834 inst.error = _("cannot combine pre- and post-indexing");
3839 else if (*p == '-') p++, inst.operands[i].negative = 1;
3841 if ((reg = arm_reg_parse (&p, REG_TYPE_RN, NULL)) != FAIL)
3843 /* We might be using the immediate for alignment already. If we
3844 are, OR the register number into the low-order bits. */
3845 if (inst.operands[i].immisalign)
3846 inst.operands[i].imm |= reg;
3848 inst.operands[i].imm = reg;
3849 inst.operands[i].immisreg = 1;
3851 if (skip_past_comma (&p) == SUCCESS)
3852 if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
3857 if (inst.operands[i].negative)
3859 inst.operands[i].negative = 0;
3862 if (my_get_expression (&inst.reloc.exp, &p, GE_IMM_PREFIX))
3868 /* If at this point neither .preind nor .postind is set, we have a
3869 bare [Rn]{!}, which is shorthand for [Rn,#0]{!}. */
3870 if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
3872 inst.operands[i].preind = 1;
3873 inst.reloc.exp.X_op = O_constant;
3874 inst.reloc.exp.X_add_number = 0;
3880 /* Miscellaneous. */
3882 /* Parse a PSR flag operand. The value returned is FAIL on syntax error,
3883 or a bitmask suitable to be or-ed into the ARM msr instruction. */
3885 parse_psr (char **str)
3888 unsigned long psr_field;
3889 const struct asm_psr *psr;
3892 /* CPSR's and SPSR's can now be lowercase. This is just a convenience
3893 feature for ease of use and backwards compatibility. */
3895 if (strncasecmp (p, "SPSR", 4) == 0)
3896 psr_field = SPSR_BIT;
3897 else if (strncasecmp (p, "CPSR", 4) == 0)
3904 while (ISALNUM (*p) || *p == '_');
3906 psr = hash_find_n (arm_v7m_psr_hsh, start, p - start);
3917 /* A suffix follows. */
3923 while (ISALNUM (*p) || *p == '_');
3925 psr = hash_find_n (arm_psr_hsh, start, p - start);
3929 psr_field |= psr->field;
3934 goto error; /* Garbage after "[CS]PSR". */
3936 psr_field |= (PSR_c | PSR_f);
3942 inst.error = _("flag for {c}psr instruction expected");
3946 /* Parse the flags argument to CPSI[ED]. Returns FAIL on error, or a
3947 value suitable for splatting into the AIF field of the instruction. */
3950 parse_cps_flags (char **str)
3959 case '\0': case ',':
3962 case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
3963 case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
3964 case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
3967 inst.error = _("unrecognized CPS flag");
3972 if (saw_a_flag == 0)
3974 inst.error = _("missing CPS flags");
3982 /* Parse an endian specifier ("BE" or "LE", case insensitive);
3983 returns 0 for big-endian, 1 for little-endian, FAIL for an error. */
3986 parse_endian_specifier (char **str)
3991 if (strncasecmp (s, "BE", 2))
3993 else if (strncasecmp (s, "LE", 2))
3997 inst.error = _("valid endian specifiers are be or le");
4001 if (ISALNUM (s[2]) || s[2] == '_')
4003 inst.error = _("valid endian specifiers are be or le");
4008 return little_endian;
4011 /* Parse a rotation specifier: ROR #0, #8, #16, #24. *val receives a
4012 value suitable for poking into the rotate field of an sxt or sxta
4013 instruction, or FAIL on error. */
4016 parse_ror (char **str)
4021 if (strncasecmp (s, "ROR", 3) == 0)
4025 inst.error = _("missing rotation field after comma");
4029 if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
4034 case 0: *str = s; return 0x0;
4035 case 8: *str = s; return 0x1;
4036 case 16: *str = s; return 0x2;
4037 case 24: *str = s; return 0x3;
4040 inst.error = _("rotation can only be 0, 8, 16, or 24");
4045 /* Parse a conditional code (from conds[] below). The value returned is in the
4046 range 0 .. 14, or FAIL. */
4048 parse_cond (char **str)
4051 const struct asm_cond *c;
4054 while (ISALPHA (*q))
4057 c = hash_find_n (arm_cond_hsh, p, q - p);
4060 inst.error = _("condition required");
4068 /* Parse an option for a barrier instruction. Returns the encoding for the
4071 parse_barrier (char **str)
4074 const struct asm_barrier_opt *o;
4077 while (ISALPHA (*q))
4080 o = hash_find_n (arm_barrier_opt_hsh, p, q - p);
4088 /* Parse the operands of a table branch instruction. Similar to a memory
4091 parse_tb (char **str)
4096 if (skip_past_char (&p, '[') == FAIL)
4098 inst.error = _("'[' expected");
4102 if ((reg = arm_reg_parse (&p, REG_TYPE_RN, NULL)) == FAIL)
4104 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4107 inst.operands[0].reg = reg;
4109 if (skip_past_comma (&p) == FAIL)
4111 inst.error = _("',' expected");
4115 if ((reg = arm_reg_parse (&p, REG_TYPE_RN, NULL)) == FAIL)
4117 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4120 inst.operands[0].imm = reg;
4122 if (skip_past_comma (&p) == SUCCESS)
4124 if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
4126 if (inst.reloc.exp.X_add_number != 1)
4128 inst.error = _("invalid shift");
4131 inst.operands[0].shifted = 1;
4134 if (skip_past_char (&p, ']') == FAIL)
4136 inst.error = _("']' expected");
4143 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
4144 information on the types the operands can take and how they are encoded.
4145 Note particularly the abuse of ".regisimm" to signify a Neon register.
4146 Up to three operands may be read; this function handles setting the
4147 ".present" field for each operand itself.
4148 Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
4149 else returns FAIL. */
4152 parse_neon_mov (char **str, int *which_operand)
4154 int i = *which_operand, val;
4155 enum arm_reg_type rtype;
4158 if ((val = parse_scalar (&ptr, 8)) != FAIL)
4160 /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>. */
4161 inst.operands[i].reg = val;
4162 inst.operands[i].isscalar = 1;
4163 inst.operands[i++].present = 1;
4165 if (skip_past_comma (&ptr) == FAIL)
4168 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN, NULL)) == FAIL)
4171 inst.operands[i].reg = val;
4172 inst.operands[i].isreg = 1;
4173 inst.operands[i].present = 1;
4175 else if ((val = arm_reg_parse (&ptr, REG_TYPE_NDQ, &rtype)) != FAIL)
4177 /* Cases 0, 1, 2, 3, 5 (D only). */
4178 if (skip_past_comma (&ptr) == FAIL)
4181 inst.operands[i].reg = val;
4182 inst.operands[i].isreg = 1;
4183 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4184 inst.operands[i++].present = 1;
4186 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN, NULL)) != FAIL)
4188 /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>. */
4189 inst.operands[i-1].regisimm = 1;
4190 inst.operands[i].reg = val;
4191 inst.operands[i].isreg = 1;
4192 inst.operands[i++].present = 1;
4194 if (rtype == REG_TYPE_NQ)
4196 inst.error = _("can't use Neon quad register here");
4199 if (skip_past_comma (&ptr) == FAIL)
4201 if ((val = arm_reg_parse (&ptr, REG_TYPE_RN, NULL)) == FAIL)
4203 inst.operands[i].reg = val;
4204 inst.operands[i].isreg = 1;
4205 inst.operands[i].present = 1;
4207 else if (parse_big_immediate (&ptr, i) == SUCCESS)
4209 /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
4210 Case 3: VMOV<c><q>.<dt> <Dd>, #<imm> */
4211 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4214 else if ((val = arm_reg_parse (&ptr, REG_TYPE_NDQ, &rtype)) != FAIL)
4216 /* Case 0: VMOV<c><q> <Qd>, <Qm>
4217 Case 1: VMOV<c><q> <Dd>, <Dm> */
4218 if (!thumb_mode && (inst.instruction & 0xf0000000) != 0xe0000000)
4221 inst.operands[i].reg = val;
4222 inst.operands[i].isreg = 1;
4223 inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
4224 inst.operands[i].present = 1;
4228 inst.error = _("expected <Rm> or <Dm> or <Qm> operand");
4232 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN, NULL)) != FAIL)
4235 inst.operands[i].reg = val;
4236 inst.operands[i].isreg = 1;
4237 inst.operands[i++].present = 1;
4239 if (skip_past_comma (&ptr) == FAIL)
4242 if ((val = parse_scalar (&ptr, 8)) != FAIL)
4244 /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]> */
4245 inst.operands[i].reg = val;
4246 inst.operands[i].isscalar = 1;
4247 inst.operands[i].present = 1;
4249 else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN, NULL)) != FAIL)
4251 /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm> */
4252 inst.operands[i].reg = val;
4253 inst.operands[i].isreg = 1;
4254 inst.operands[i++].present = 1;
4256 if (skip_past_comma (&ptr) == FAIL)
4259 if ((val = arm_reg_parse (&ptr, REG_TYPE_VFD, NULL)) == FAIL)
4261 inst.error = _(reg_expected_msgs[REG_TYPE_VFD]);
4265 inst.operands[i].reg = val;
4266 inst.operands[i].isreg = 1;
4267 inst.operands[i].regisimm = 1;
4268 inst.operands[i].present = 1;
4273 inst.error = _("parse error");
4277 /* Successfully parsed the operands. Update args. */
4283 inst.error = _("expected comma");
4287 inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
4291 inst.error = _("instruction cannot be conditionalized");
4295 /* Matcher codes for parse_operands. */
4296 enum operand_parse_code
4298 OP_stop, /* end of line */
4300 OP_RR, /* ARM register */
4301 OP_RRnpc, /* ARM register, not r15 */
4302 OP_RRnpcb, /* ARM register, not r15, in square brackets */
4303 OP_RRw, /* ARM register, not r15, optional trailing ! */
4304 OP_RCP, /* Coprocessor number */
4305 OP_RCN, /* Coprocessor register */
4306 OP_RF, /* FPA register */
4307 OP_RVS, /* VFP single precision register */
4308 OP_RVD, /* VFP double precision register (0..15) */
4309 OP_RND, /* Neon double precision register (0..31) */
4310 OP_RNQ, /* Neon quad precision register */
4311 OP_RNDQ, /* Neon double or quad precision register */
4312 OP_RNSC, /* Neon scalar D[X] */
4313 OP_RVC, /* VFP control register */
4314 OP_RMF, /* Maverick F register */
4315 OP_RMD, /* Maverick D register */
4316 OP_RMFX, /* Maverick FX register */
4317 OP_RMDX, /* Maverick DX register */
4318 OP_RMAX, /* Maverick AX register */
4319 OP_RMDS, /* Maverick DSPSC register */
4320 OP_RIWR, /* iWMMXt wR register */
4321 OP_RIWC, /* iWMMXt wC register */
4322 OP_RIWG, /* iWMMXt wCG register */
4323 OP_RXA, /* XScale accumulator register */
4325 OP_REGLST, /* ARM register list */
4326 OP_VRSLST, /* VFP single-precision register list */
4327 OP_VRDLST, /* VFP double-precision register list */
4328 OP_NRDLST, /* Neon double-precision register list (d0-d31, qN aliases) */
4329 OP_NSTRLST, /* Neon element/structure list */
4331 OP_NILO, /* Neon immediate/logic operands 2 or 2+3. (VBIC, VORR...) */
4332 OP_RNDQ_I0, /* Neon D or Q reg, or immediate zero. */
4333 OP_RR_RNSC, /* ARM reg or Neon scalar. */
4334 OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar. */
4335 OP_RND_RNSC, /* Neon D reg, or Neon scalar. */
4336 OP_VMOV, /* Neon VMOV operands. */
4337 OP_RNDQ_IMVNb,/* Neon D or Q reg, or immediate good for VMVN. */
4338 OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift. */
4340 OP_I0, /* immediate zero */
4341 OP_I7, /* immediate value 0 .. 7 */
4342 OP_I15, /* 0 .. 15 */
4343 OP_I16, /* 1 .. 16 */
4344 OP_I16z, /* 0 .. 16 */
4345 OP_I31, /* 0 .. 31 */
4346 OP_I31w, /* 0 .. 31, optional trailing ! */
4347 OP_I32, /* 1 .. 32 */
4348 OP_I32z, /* 0 .. 32 */
4349 OP_I63, /* 0 .. 63 */
4350 OP_I63s, /* -64 .. 63 */
4351 OP_I64, /* 1 .. 64 */
4352 OP_I64z, /* 0 .. 64 */
4353 OP_I255, /* 0 .. 255 */
4354 OP_Iffff, /* 0 .. 65535 */
4356 OP_I4b, /* immediate, prefix optional, 1 .. 4 */
4357 OP_I7b, /* 0 .. 7 */
4358 OP_I15b, /* 0 .. 15 */
4359 OP_I31b, /* 0 .. 31 */
4361 OP_SH, /* shifter operand */
4362 OP_ADDR, /* Memory address expression (any mode) */
4363 OP_EXP, /* arbitrary expression */
4364 OP_EXPi, /* same, with optional immediate prefix */
4365 OP_EXPr, /* same, with optional relocation suffix */
4367 OP_CPSF, /* CPS flags */
4368 OP_ENDI, /* Endianness specifier */
4369 OP_PSR, /* CPSR/SPSR mask for msr */
4370 OP_COND, /* conditional code */
4371 OP_TB, /* Table branch. */
4373 OP_RRnpc_I0, /* ARM register or literal 0 */
4374 OP_RR_EXr, /* ARM register or expression with opt. reloc suff. */
4375 OP_RR_EXi, /* ARM register or expression with imm prefix */
4376 OP_RF_IF, /* FPA register or immediate */
4377 OP_RIWR_RIWC, /* iWMMXt R or C reg */
4379 /* Optional operands. */
4380 OP_oI7b, /* immediate, prefix optional, 0 .. 7 */
4381 OP_oI31b, /* 0 .. 31 */
4382 OP_oI32b, /* 1 .. 32 */
4383 OP_oIffffb, /* 0 .. 65535 */
4384 OP_oI255c, /* curly-brace enclosed, 0 .. 255 */
4386 OP_oRR, /* ARM register */
4387 OP_oRRnpc, /* ARM register, not the PC */
4388 OP_oRND, /* Optional Neon double precision register */
4389 OP_oRNQ, /* Optional Neon quad precision register */
4390 OP_oRNDQ, /* Optional Neon double or quad precision register */
4391 OP_oSHll, /* LSL immediate */
4392 OP_oSHar, /* ASR immediate */
4393 OP_oSHllar, /* LSL or ASR immediate */
4394 OP_oROR, /* ROR 0/8/16/24 */
4395 OP_oBARRIER, /* Option argument for a barrier instruction. */
4397 OP_FIRST_OPTIONAL = OP_oI7b
4400 /* Generic instruction operand parser. This does no encoding and no
4401 semantic validation; it merely squirrels values away in the inst
4402 structure. Returns SUCCESS or FAIL depending on whether the
4403 specified grammar matched. */
4405 parse_operands (char *str, const unsigned char *pattern)
4407 unsigned const char *upat = pattern;
4408 char *backtrack_pos = 0;
4409 const char *backtrack_error = 0;
4410 int i, val, backtrack_index = 0;
4411 enum arm_reg_type rtype;
4413 #define po_char_or_fail(chr) do { \
4414 if (skip_past_char (&str, chr) == FAIL) \
4418 #define po_reg_or_fail(regtype) do { \
4419 val = arm_reg_parse (&str, regtype, &rtype); \
4422 inst.error = _(reg_expected_msgs[regtype]); \
4425 inst.operands[i].reg = val; \
4426 inst.operands[i].isreg = 1; \
4427 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4430 #define po_reg_or_goto(regtype, label) do { \
4431 val = arm_reg_parse (&str, regtype, &rtype); \
4435 inst.operands[i].reg = val; \
4436 inst.operands[i].isreg = 1; \
4437 inst.operands[i].isquad = (rtype == REG_TYPE_NQ); \
4440 #define po_imm_or_fail(min, max, popt) do { \
4441 if (parse_immediate (&str, &val, min, max, popt) == FAIL) \
4443 inst.operands[i].imm = val; \
4446 #define po_scalar_or_goto(elsz, label) do { \
4447 val = parse_scalar (&str, elsz); \
4450 inst.operands[i].reg = val; \
4451 inst.operands[i].isscalar = 1; \
4454 #define po_misc_or_fail(expr) do { \
4459 skip_whitespace (str);
4461 for (i = 0; upat[i] != OP_stop; i++)
4463 if (upat[i] >= OP_FIRST_OPTIONAL)
4465 /* Remember where we are in case we need to backtrack. */
4466 assert (!backtrack_pos);
4467 backtrack_pos = str;
4468 backtrack_error = inst.error;
4469 backtrack_index = i;
4473 po_char_or_fail (',');
4481 case OP_RR: po_reg_or_fail (REG_TYPE_RN); break;
4482 case OP_RCP: po_reg_or_fail (REG_TYPE_CP); break;
4483 case OP_RCN: po_reg_or_fail (REG_TYPE_CN); break;
4484 case OP_RF: po_reg_or_fail (REG_TYPE_FN); break;
4485 case OP_RVS: po_reg_or_fail (REG_TYPE_VFS); break;
4486 case OP_RVD: po_reg_or_fail (REG_TYPE_VFD); break;
4488 case OP_RND: po_reg_or_fail (REG_TYPE_VFD); break;
4489 case OP_RVC: po_reg_or_fail (REG_TYPE_VFC); break;
4490 case OP_RMF: po_reg_or_fail (REG_TYPE_MVF); break;
4491 case OP_RMD: po_reg_or_fail (REG_TYPE_MVD); break;
4492 case OP_RMFX: po_reg_or_fail (REG_TYPE_MVFX); break;
4493 case OP_RMDX: po_reg_or_fail (REG_TYPE_MVDX); break;
4494 case OP_RMAX: po_reg_or_fail (REG_TYPE_MVAX); break;
4495 case OP_RMDS: po_reg_or_fail (REG_TYPE_DSPSC); break;
4496 case OP_RIWR: po_reg_or_fail (REG_TYPE_MMXWR); break;
4497 case OP_RIWC: po_reg_or_fail (REG_TYPE_MMXWC); break;
4498 case OP_RIWG: po_reg_or_fail (REG_TYPE_MMXWCG); break;
4499 case OP_RXA: po_reg_or_fail (REG_TYPE_XSCALE); break;
4501 case OP_RNQ: po_reg_or_fail (REG_TYPE_NQ); break;
4503 case OP_RNDQ: po_reg_or_fail (REG_TYPE_NDQ); break;
4505 /* Neon scalar. Using an element size of 8 means that some invalid
4506 scalars are accepted here, so deal with those in later code. */
4507 case OP_RNSC: po_scalar_or_goto (8, failure); break;
4509 /* WARNING: We can expand to two operands here. This has the potential
4510 to totally confuse the backtracking mechanism! It will be OK at
4511 least as long as we don't try to use optional args as well,
4515 po_reg_or_goto (REG_TYPE_NDQ, try_imm);
4517 skip_past_comma (&str);
4518 po_reg_or_goto (REG_TYPE_NDQ, one_reg_only);
4521 /* Optional register operand was omitted. Unfortunately, it's in
4522 operands[i-1] and we need it to be in inst.operands[i]. Fix that
4523 here (this is a bit grotty). */
4524 inst.operands[i] = inst.operands[i-1];
4525 inst.operands[i-1].present = 0;
4528 /* Immediate gets verified properly later, so accept any now. */
4529 po_imm_or_fail (INT_MIN, INT_MAX, TRUE);
4535 po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
4538 po_imm_or_fail (0, 0, TRUE);
4544 po_scalar_or_goto (8, try_rr);
4547 po_reg_or_fail (REG_TYPE_RN);
4553 po_scalar_or_goto (8, try_ndq);
4556 po_reg_or_fail (REG_TYPE_NDQ);
4562 po_scalar_or_goto (8, try_vfd);
4565 po_reg_or_fail (REG_TYPE_VFD);
4570 /* WARNING: parse_neon_mov can move the operand counter, i. If we're
4571 not careful then bad things might happen. */
4572 po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
4577 po_reg_or_goto (REG_TYPE_NDQ, try_mvnimm);
4580 /* There's a possibility of getting a 64-bit immediate here, so
4581 we need special handling. */
4582 if (parse_big_immediate (&str, i) == FAIL)
4584 inst.error = _("immediate value is out of range");
4592 po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
4595 po_imm_or_fail (0, 63, TRUE);
4600 po_char_or_fail ('[');
4601 po_reg_or_fail (REG_TYPE_RN);
4602 po_char_or_fail (']');
4606 po_reg_or_fail (REG_TYPE_RN);
4607 if (skip_past_char (&str, '!') == SUCCESS)
4608 inst.operands[i].writeback = 1;
4612 case OP_I7: po_imm_or_fail ( 0, 7, FALSE); break;
4613 case OP_I15: po_imm_or_fail ( 0, 15, FALSE); break;
4614 case OP_I16: po_imm_or_fail ( 1, 16, FALSE); break;
4615 case OP_I16z: po_imm_or_fail ( 0, 16, FALSE); break;
4616 case OP_I31: po_imm_or_fail ( 0, 31, FALSE); break;
4617 case OP_I32: po_imm_or_fail ( 1, 32, FALSE); break;
4618 case OP_I32z: po_imm_or_fail ( 0, 32, FALSE); break;
4619 case OP_I63s: po_imm_or_fail (-64, 63, FALSE); break;
4620 case OP_I63: po_imm_or_fail ( 0, 63, FALSE); break;
4621 case OP_I64: po_imm_or_fail ( 1, 64, FALSE); break;
4622 case OP_I64z: po_imm_or_fail ( 0, 64, FALSE); break;
4623 case OP_I255: po_imm_or_fail ( 0, 255, FALSE); break;
4624 case OP_Iffff: po_imm_or_fail ( 0, 0xffff, FALSE); break;
4626 case OP_I4b: po_imm_or_fail ( 1, 4, TRUE); break;
4628 case OP_I7b: po_imm_or_fail ( 0, 7, TRUE); break;
4629 case OP_I15b: po_imm_or_fail ( 0, 15, TRUE); break;
4631 case OP_I31b: po_imm_or_fail ( 0, 31, TRUE); break;
4632 case OP_oI32b: po_imm_or_fail ( 1, 32, TRUE); break;
4633 case OP_oIffffb: po_imm_or_fail ( 0, 0xffff, TRUE); break;
4635 /* Immediate variants */
4637 po_char_or_fail ('{');
4638 po_imm_or_fail (0, 255, TRUE);
4639 po_char_or_fail ('}');
4643 /* The expression parser chokes on a trailing !, so we have
4644 to find it first and zap it. */
4647 while (*s && *s != ',')
4652 inst.operands[i].writeback = 1;
4654 po_imm_or_fail (0, 31, TRUE);
4662 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4667 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4672 po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
4674 if (inst.reloc.exp.X_op == O_symbol)
4676 val = parse_reloc (&str);
4679 inst.error = _("unrecognized relocation suffix");
4682 else if (val != BFD_RELOC_UNUSED)
4684 inst.operands[i].imm = val;
4685 inst.operands[i].hasreloc = 1;
4690 /* Register or expression */
4691 case OP_RR_EXr: po_reg_or_goto (REG_TYPE_RN, EXPr); break;
4692 case OP_RR_EXi: po_reg_or_goto (REG_TYPE_RN, EXPi); break;
4694 /* Register or immediate */
4695 case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0); break;
4696 I0: po_imm_or_fail (0, 0, FALSE); break;
4698 case OP_RF_IF: po_reg_or_goto (REG_TYPE_FN, IF); break;
4700 if (!is_immediate_prefix (*str))
4703 val = parse_fpa_immediate (&str);
4706 /* FPA immediates are encoded as registers 8-15.
4707 parse_fpa_immediate has already applied the offset. */
4708 inst.operands[i].reg = val;
4709 inst.operands[i].isreg = 1;
4712 /* Two kinds of register */
4715 struct reg_entry *rege = arm_reg_parse_multi (&str);
4716 if (rege->type != REG_TYPE_MMXWR
4717 && rege->type != REG_TYPE_MMXWC
4718 && rege->type != REG_TYPE_MMXWCG)
4720 inst.error = _("iWMMXt data or control register expected");
4723 inst.operands[i].reg = rege->number;
4724 inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
4729 case OP_CPSF: val = parse_cps_flags (&str); break;
4730 case OP_ENDI: val = parse_endian_specifier (&str); break;
4731 case OP_oROR: val = parse_ror (&str); break;
4732 case OP_PSR: val = parse_psr (&str); break;
4733 case OP_COND: val = parse_cond (&str); break;
4734 case OP_oBARRIER:val = parse_barrier (&str); break;
4737 po_misc_or_fail (parse_tb (&str));
4740 /* Register lists */
4742 val = parse_reg_list (&str);
4745 inst.operands[1].writeback = 1;
4751 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S);
4755 val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D);
4759 val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
4764 val = parse_neon_el_struct_list (&str, &inst.operands[i].reg);
4767 /* Addressing modes */
4769 po_misc_or_fail (parse_address (&str, i));
4773 po_misc_or_fail (parse_shifter_operand (&str, i));
4777 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
4781 po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
4785 po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
4789 as_fatal ("unhandled operand code %d", upat[i]);
4792 /* Various value-based sanity checks and shared operations. We
4793 do not signal immediate failures for the register constraints;
4794 this allows a syntax error to take precedence. */
4802 if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
4803 inst.error = BAD_PC;
4819 inst.operands[i].imm = val;
4826 /* If we get here, this operand was successfully parsed. */
4827 inst.operands[i].present = 1;
4831 inst.error = BAD_ARGS;
4836 /* The parse routine should already have set inst.error, but set a
4837 defaut here just in case. */
4839 inst.error = _("syntax error");
4843 /* Do not backtrack over a trailing optional argument that
4844 absorbed some text. We will only fail again, with the
4845 'garbage following instruction' error message, which is
4846 probably less helpful than the current one. */
4847 if (backtrack_index == i && backtrack_pos != str
4848 && upat[i+1] == OP_stop)
4851 inst.error = _("syntax error");
4855 /* Try again, skipping the optional argument at backtrack_pos. */
4856 str = backtrack_pos;
4857 inst.error = backtrack_error;
4858 inst.operands[backtrack_index].present = 0;
4859 i = backtrack_index;
4863 /* Check that we have parsed all the arguments. */
4864 if (*str != '\0' && !inst.error)
4865 inst.error = _("garbage following instruction");
4867 return inst.error ? FAIL : SUCCESS;
4870 #undef po_char_or_fail
4871 #undef po_reg_or_fail
4872 #undef po_reg_or_goto
4873 #undef po_imm_or_fail
4874 #undef po_scalar_or_fail
4876 /* Shorthand macro for instruction encoding functions issuing errors. */
4877 #define constraint(expr, err) do { \
4885 /* Functions for operand encoding. ARM, then Thumb. */
4887 #define rotate_left(v, n) (v << n | v >> (32 - n))
4889 /* If VAL can be encoded in the immediate field of an ARM instruction,
4890 return the encoded form. Otherwise, return FAIL. */
4893 encode_arm_immediate (unsigned int val)
4897 for (i = 0; i < 32; i += 2)
4898 if ((a = rotate_left (val, i)) <= 0xff)
4899 return a | (i << 7); /* 12-bit pack: [shift-cnt,const]. */
4904 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
4905 return the encoded form. Otherwise, return FAIL. */
4907 encode_thumb32_immediate (unsigned int val)
4914 for (i = 1; i <= 24; i++)
4917 if ((val & ~(0xff << i)) == 0)
4918 return ((val >> i) & 0x7f) | ((32 - i) << 7);
4922 if (val == ((a << 16) | a))
4924 if (val == ((a << 24) | (a << 16) | (a << 8) | a))
4928 if (val == ((a << 16) | a))
4929 return 0x200 | (a >> 8);
4933 /* Encode a VFP SP or DP register number into inst.instruction. */
4936 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
4938 if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
4941 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
4944 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
4947 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
4952 inst.error = _("D register out of range for selected VFP version");
4960 inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
4964 inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
4968 inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
4972 inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
4976 inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
4980 inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
4988 /* Encode a <shift> in an ARM-format instruction. The immediate,
4989 if any, is handled by md_apply_fix. */
4991 encode_arm_shift (int i)
4993 if (inst.operands[i].shift_kind == SHIFT_RRX)
4994 inst.instruction |= SHIFT_ROR << 5;
4997 inst.instruction |= inst.operands[i].shift_kind << 5;
4998 if (inst.operands[i].immisreg)
5000 inst.instruction |= SHIFT_BY_REG;
5001 inst.instruction |= inst.operands[i].imm << 8;
5004 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5009 encode_arm_shifter_operand (int i)
5011 if (inst.operands[i].isreg)
5013 inst.instruction |= inst.operands[i].reg;
5014 encode_arm_shift (i);
5017 inst.instruction |= INST_IMMEDIATE;
5020 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3. */
5022 encode_arm_addr_mode_common (int i, bfd_boolean is_t)
5024 assert (inst.operands[i].isreg);
5025 inst.instruction |= inst.operands[i].reg << 16;
5027 if (inst.operands[i].preind)
5031 inst.error = _("instruction does not accept preindexed addressing");
5034 inst.instruction |= PRE_INDEX;
5035 if (inst.operands[i].writeback)
5036 inst.instruction |= WRITE_BACK;
5039 else if (inst.operands[i].postind)
5041 assert (inst.operands[i].writeback);
5043 inst.instruction |= WRITE_BACK;
5045 else /* unindexed - only for coprocessor */
5047 inst.error = _("instruction does not accept unindexed addressing");
5051 if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
5052 && (((inst.instruction & 0x000f0000) >> 16)
5053 == ((inst.instruction & 0x0000f000) >> 12)))
5054 as_warn ((inst.instruction & LOAD_BIT)
5055 ? _("destination register same as write-back base")
5056 : _("source register same as write-back base"));
5059 /* inst.operands[i] was set up by parse_address. Encode it into an
5060 ARM-format mode 2 load or store instruction. If is_t is true,
5061 reject forms that cannot be used with a T instruction (i.e. not
5064 encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
5066 encode_arm_addr_mode_common (i, is_t);
5068 if (inst.operands[i].immisreg)
5070 inst.instruction |= INST_IMMEDIATE; /* yes, this is backwards */
5071 inst.instruction |= inst.operands[i].imm;
5072 if (!inst.operands[i].negative)
5073 inst.instruction |= INDEX_UP;
5074 if (inst.operands[i].shifted)
5076 if (inst.operands[i].shift_kind == SHIFT_RRX)
5077 inst.instruction |= SHIFT_ROR << 5;
5080 inst.instruction |= inst.operands[i].shift_kind << 5;
5081 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
5085 else /* immediate offset in inst.reloc */
5087 if (inst.reloc.type == BFD_RELOC_UNUSED)
5088 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM;
5092 /* inst.operands[i] was set up by parse_address. Encode it into an
5093 ARM-format mode 3 load or store instruction. Reject forms that
5094 cannot be used with such instructions. If is_t is true, reject
5095 forms that cannot be used with a T instruction (i.e. not
5098 encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
5100 if (inst.operands[i].immisreg && inst.operands[i].shifted)
5102 inst.error = _("instruction does not accept scaled register index");
5106 encode_arm_addr_mode_common (i, is_t);
5108 if (inst.operands[i].immisreg)
5110 inst.instruction |= inst.operands[i].imm;
5111 if (!inst.operands[i].negative)
5112 inst.instruction |= INDEX_UP;
5114 else /* immediate offset in inst.reloc */
5116 inst.instruction |= HWOFFSET_IMM;
5117 if (inst.reloc.type == BFD_RELOC_UNUSED)
5118 inst.reloc.type = BFD_RELOC_ARM_OFFSET_IMM8;
5122 /* inst.operands[i] was set up by parse_address. Encode it into an
5123 ARM-format instruction. Reject all forms which cannot be encoded
5124 into a coprocessor load/store instruction. If wb_ok is false,
5125 reject use of writeback; if unind_ok is false, reject use of
5126 unindexed addressing. If reloc_override is not 0, use it instead
5127 of BFD_ARM_CP_OFF_IMM. */
5130 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
5132 inst.instruction |= inst.operands[i].reg << 16;
5134 assert (!(inst.operands[i].preind && inst.operands[i].postind));
5136 if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
5138 assert (!inst.operands[i].writeback);
5141 inst.error = _("instruction does not support unindexed addressing");
5144 inst.instruction |= inst.operands[i].imm;
5145 inst.instruction |= INDEX_UP;
5149 if (inst.operands[i].preind)
5150 inst.instruction |= PRE_INDEX;
5152 if (inst.operands[i].writeback)
5154 if (inst.operands[i].reg == REG_PC)
5156 inst.error = _("pc may not be used with write-back");
5161 inst.error = _("instruction does not support writeback");
5164 inst.instruction |= WRITE_BACK;
5168 inst.reloc.type = reloc_override;
5169 else if (thumb_mode)
5170 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
5172 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
5176 /* inst.reloc.exp describes an "=expr" load pseudo-operation.
5177 Determine whether it can be performed with a move instruction; if
5178 it can, convert inst.instruction to that move instruction and
5179 return 1; if it can't, convert inst.instruction to a literal-pool
5180 load and return 0. If this is not a valid thing to do in the
5181 current context, set inst.error and return 1.
5183 inst.operands[i] describes the destination register. */
5186 move_or_literal_pool (int i, bfd_boolean thumb_p, bfd_boolean mode_3)
5191 tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
5195 if ((inst.instruction & tbit) == 0)
5197 inst.error = _("invalid pseudo operation");
5200 if (inst.reloc.exp.X_op != O_constant && inst.reloc.exp.X_op != O_symbol)
5202 inst.error = _("constant expression expected");
5205 if (inst.reloc.exp.X_op == O_constant)
5209 if (!unified_syntax && (inst.reloc.exp.X_add_number & ~0xFF) == 0)
5211 /* This can be done with a mov(1) instruction. */
5212 inst.instruction = T_OPCODE_MOV_I8 | (inst.operands[i].reg << 8);
5213 inst.instruction |= inst.reloc.exp.X_add_number;
5219 int value = encode_arm_immediate (inst.reloc.exp.X_add_number);
5222 /* This can be done with a mov instruction. */
5223 inst.instruction &= LITERAL_MASK;
5224 inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
5225 inst.instruction |= value & 0xfff;
5229 value = encode_arm_immediate (~inst.reloc.exp.X_add_number);
5232 /* This can be done with a mvn instruction. */
5233 inst.instruction &= LITERAL_MASK;
5234 inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
5235 inst.instruction |= value & 0xfff;
5241 if (add_to_lit_pool () == FAIL)
5243 inst.error = _("literal pool insertion failed");
5246 inst.operands[1].reg = REG_PC;
5247 inst.operands[1].isreg = 1;
5248 inst.operands[1].preind = 1;
5249 inst.reloc.pc_rel = 1;
5250 inst.reloc.type = (thumb_p
5251 ? BFD_RELOC_ARM_THUMB_OFFSET
5253 ? BFD_RELOC_ARM_HWLITERAL
5254 : BFD_RELOC_ARM_LITERAL));
5258 /* Functions for instruction encoding, sorted by subarchitecture.
5259 First some generics; their names are taken from the conventional
5260 bit positions for register arguments in ARM format instructions. */
5270 inst.instruction |= inst.operands[0].reg << 12;
5276 inst.instruction |= inst.operands[0].reg << 12;
5277 inst.instruction |= inst.operands[1].reg;
5283 inst.instruction |= inst.operands[0].reg << 12;
5284 inst.instruction |= inst.operands[1].reg << 16;
5290 inst.instruction |= inst.operands[0].reg << 16;
5291 inst.instruction |= inst.operands[1].reg << 12;
5297 unsigned Rn = inst.operands[2].reg;
5298 /* Enforce restrictions on SWP instruction. */
5299 if ((inst.instruction & 0x0fbfffff) == 0x01000090)
5300 constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
5301 _("Rn must not overlap other operands"));
5302 inst.instruction |= inst.operands[0].reg << 12;
5303 inst.instruction |= inst.operands[1].reg;
5304 inst.instruction |= Rn << 16;
5310 inst.instruction |= inst.operands[0].reg << 12;
5311 inst.instruction |= inst.operands[1].reg << 16;
5312 inst.instruction |= inst.operands[2].reg;
5318 inst.instruction |= inst.operands[0].reg;
5319 inst.instruction |= inst.operands[1].reg << 12;
5320 inst.instruction |= inst.operands[2].reg << 16;
5326 inst.instruction |= inst.operands[0].imm;
5332 inst.instruction |= inst.operands[0].reg << 12;
5333 encode_arm_cp_address (1, TRUE, TRUE, 0);
5336 /* ARM instructions, in alphabetical order by function name (except
5337 that wrapper functions appear immediately after the function they
5340 /* This is a pseudo-op of the form "adr rd, label" to be converted
5341 into a relative address of the form "add rd, pc, #label-.-8". */
5346 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5348 /* Frag hacking will turn this into a sub instruction if the offset turns
5349 out to be negative. */
5350 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5351 inst.reloc.pc_rel = 1;
5352 inst.reloc.exp.X_add_number -= 8;
5355 /* This is a pseudo-op of the form "adrl rd, label" to be converted
5356 into a relative address of the form:
5357 add rd, pc, #low(label-.-8)"
5358 add rd, rd, #high(label-.-8)" */
5363 inst.instruction |= (inst.operands[0].reg << 12); /* Rd */
5365 /* Frag hacking will turn this into a sub instruction if the offset turns
5366 out to be negative. */
5367 inst.reloc.type = BFD_RELOC_ARM_ADRL_IMMEDIATE;
5368 inst.reloc.pc_rel = 1;
5369 inst.size = INSN_SIZE * 2;
5370 inst.reloc.exp.X_add_number -= 8;
5376 if (!inst.operands[1].present)
5377 inst.operands[1].reg = inst.operands[0].reg;
5378 inst.instruction |= inst.operands[0].reg << 12;
5379 inst.instruction |= inst.operands[1].reg << 16;
5380 encode_arm_shifter_operand (2);
5386 if (inst.operands[0].present)
5388 constraint ((inst.instruction & 0xf0) != 0x40
5389 && inst.operands[0].imm != 0xf,
5390 "bad barrier type");
5391 inst.instruction |= inst.operands[0].imm;
5394 inst.instruction |= 0xf;
5400 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
5401 constraint (msb > 32, _("bit-field extends past end of register"));
5402 /* The instruction encoding stores the LSB and MSB,
5403 not the LSB and width. */
5404 inst.instruction |= inst.operands[0].reg << 12;
5405 inst.instruction |= inst.operands[1].imm << 7;
5406 inst.instruction |= (msb - 1) << 16;
5414 /* #0 in second position is alternative syntax for bfc, which is
5415 the same instruction but with REG_PC in the Rm field. */
5416 if (!inst.operands[1].isreg)
5417 inst.operands[1].reg = REG_PC;
5419 msb = inst.operands[2].imm + inst.operands[3].imm;
5420 constraint (msb > 32, _("bit-field extends past end of register"));
5421 /* The instruction encoding stores the LSB and MSB,
5422 not the LSB and width. */
5423 inst.instruction |= inst.operands[0].reg << 12;
5424 inst.instruction |= inst.operands[1].reg;
5425 inst.instruction |= inst.operands[2].imm << 7;
5426 inst.instruction |= (msb - 1) << 16;
5432 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
5433 _("bit-field extends past end of register"));
5434 inst.instruction |= inst.operands[0].reg << 12;
5435 inst.instruction |= inst.operands[1].reg;
5436 inst.instruction |= inst.operands[2].imm << 7;
5437 inst.instruction |= (inst.operands[3].imm - 1) << 16;
5440 /* ARM V5 breakpoint instruction (argument parse)
5441 BKPT <16 bit unsigned immediate>
5442 Instruction is not conditional.
5443 The bit pattern given in insns[] has the COND_ALWAYS condition,
5444 and it is an error if the caller tried to override that. */
5449 /* Top 12 of 16 bits to bits 19:8. */
5450 inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
5452 /* Bottom 4 of 16 bits to bits 3:0. */
5453 inst.instruction |= inst.operands[0].imm & 0xf;
5457 encode_branch (int default_reloc)
5459 if (inst.operands[0].hasreloc)
5461 constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32,
5462 _("the only suffix valid here is '(plt)'"));
5463 inst.reloc.type = BFD_RELOC_ARM_PLT32;
5467 inst.reloc.type = default_reloc;
5469 inst.reloc.pc_rel = 1;
5476 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
5477 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
5480 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
5487 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
5489 if (inst.cond == COND_ALWAYS)
5490 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
5492 encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
5496 encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
5499 /* ARM V5 branch-link-exchange instruction (argument parse)
5500 BLX <target_addr> ie BLX(1)
5501 BLX{<condition>} <Rm> ie BLX(2)
5502 Unfortunately, there are two different opcodes for this mnemonic.
5503 So, the insns[].value is not used, and the code here zaps values
5504 into inst.instruction.
5505 Also, the <target_addr> can be 25 bits, hence has its own reloc. */
5510 if (inst.operands[0].isreg)
5512 /* Arg is a register; the opcode provided by insns[] is correct.
5513 It is not illegal to do "blx pc", just useless. */
5514 if (inst.operands[0].reg == REG_PC)
5515 as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
5517 inst.instruction |= inst.operands[0].reg;
5521 /* Arg is an address; this instruction cannot be executed
5522 conditionally, and the opcode must be adjusted. */
5523 constraint (inst.cond != COND_ALWAYS, BAD_COND);
5524 inst.instruction = 0xfa000000;
5526 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
5527 encode_branch (BFD_RELOC_ARM_PCREL_CALL);
5530 encode_branch (BFD_RELOC_ARM_PCREL_BLX);
5537 if (inst.operands[0].reg == REG_PC)
5538 as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
5540 inst.instruction |= inst.operands[0].reg;
5544 /* ARM v5TEJ. Jump to Jazelle code. */
5549 if (inst.operands[0].reg == REG_PC)
5550 as_tsktsk (_("use of r15 in bxj is not really useful"));
5552 inst.instruction |= inst.operands[0].reg;
5555 /* Co-processor data operation:
5556 CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
5557 CDP2 <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>} */
5561 inst.instruction |= inst.operands[0].reg << 8;
5562 inst.instruction |= inst.operands[1].imm << 20;
5563 inst.instruction |= inst.operands[2].reg << 12;
5564 inst.instruction |= inst.operands[3].reg << 16;
5565 inst.instruction |= inst.operands[4].reg;
5566 inst.instruction |= inst.operands[5].imm << 5;
5572 inst.instruction |= inst.operands[0].reg << 16;
5573 encode_arm_shifter_operand (1);
5576 /* Transfer between coprocessor and ARM registers.
5577 MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
5582 No special properties. */
5587 inst.instruction |= inst.operands[0].reg << 8;
5588 inst.instruction |= inst.operands[1].imm << 21;
5589 inst.instruction |= inst.operands[2].reg << 12;
5590 inst.instruction |= inst.operands[3].reg << 16;
5591 inst.instruction |= inst.operands[4].reg;
5592 inst.instruction |= inst.operands[5].imm << 5;
5595 /* Transfer between coprocessor register and pair of ARM registers.
5596 MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
5601 Two XScale instructions are special cases of these:
5603 MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
5604 MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
5606 Result unpredicatable if Rd or Rn is R15. */
5611 inst.instruction |= inst.operands[0].reg << 8;
5612 inst.instruction |= inst.operands[1].imm << 4;
5613 inst.instruction |= inst.operands[2].reg << 12;
5614 inst.instruction |= inst.operands[3].reg << 16;
5615 inst.instruction |= inst.operands[4].reg;
5621 inst.instruction |= inst.operands[0].imm << 6;
5622 inst.instruction |= inst.operands[1].imm;
5628 inst.instruction |= inst.operands[0].imm;
5634 /* There is no IT instruction in ARM mode. We
5635 process it but do not generate code for it. */
5642 int base_reg = inst.operands[0].reg;
5643 int range = inst.operands[1].imm;
5645 inst.instruction |= base_reg << 16;
5646 inst.instruction |= range;
5648 if (inst.operands[1].writeback)
5649 inst.instruction |= LDM_TYPE_2_OR_3;
5651 if (inst.operands[0].writeback)
5653 inst.instruction |= WRITE_BACK;
5654 /* Check for unpredictable uses of writeback. */
5655 if (inst.instruction & LOAD_BIT)
5657 /* Not allowed in LDM type 2. */
5658 if ((inst.instruction & LDM_TYPE_2_OR_3)
5659 && ((range & (1 << REG_PC)) == 0))
5660 as_warn (_("writeback of base register is UNPREDICTABLE"));
5661 /* Only allowed if base reg not in list for other types. */
5662 else if (range & (1 << base_reg))
5663 as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
5667 /* Not allowed for type 2. */
5668 if (inst.instruction & LDM_TYPE_2_OR_3)
5669 as_warn (_("writeback of base register is UNPREDICTABLE"));
5670 /* Only allowed if base reg not in list, or first in list. */
5671 else if ((range & (1 << base_reg))
5672 && (range & ((1 << base_reg) - 1)))
5673 as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
5678 /* ARMv5TE load-consecutive (argument parse)
5687 constraint (inst.operands[0].reg % 2 != 0,
5688 _("first destination register must be even"));
5689 constraint (inst.operands[1].present
5690 && inst.operands[1].reg != inst.operands[0].reg + 1,
5691 _("can only load two consecutive registers"));
5692 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
5693 constraint (!inst.operands[2].isreg, _("'[' expected"));
5695 if (!inst.operands[1].present)
5696 inst.operands[1].reg = inst.operands[0].reg + 1;
5698 if (inst.instruction & LOAD_BIT)
5700 /* encode_arm_addr_mode_3 will diagnose overlap between the base
5701 register and the first register written; we have to diagnose
5702 overlap between the base and the second register written here. */
5704 if (inst.operands[2].reg == inst.operands[1].reg
5705 && (inst.operands[2].writeback || inst.operands[2].postind))
5706 as_warn (_("base register written back, and overlaps "
5707 "second destination register"));
5709 /* For an index-register load, the index register must not overlap the
5710 destination (even if not write-back). */
5711 else if (inst.operands[2].immisreg
5712 && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
5713 || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
5714 as_warn (_("index register overlaps destination register"));
5717 inst.instruction |= inst.operands[0].reg << 12;
5718 encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
5724 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
5725 || inst.operands[1].postind || inst.operands[1].writeback
5726 || inst.operands[1].immisreg || inst.operands[1].shifted
5727 || inst.operands[1].negative
5728 /* This can arise if the programmer has written
5730 or if they have mistakenly used a register name as the last
5733 It is very difficult to distinguish between these two cases
5734 because "rX" might actually be a label. ie the register
5735 name has been occluded by a symbol of the same name. So we
5736 just generate a general 'bad addressing mode' type error
5737 message and leave it up to the programmer to discover the
5738 true cause and fix their mistake. */
5739 || (inst.operands[1].reg == REG_PC),
5742 constraint (inst.reloc.exp.X_op != O_constant
5743 || inst.reloc.exp.X_add_number != 0,
5744 _("offset must be zero in ARM encoding"));
5746 inst.instruction |= inst.operands[0].reg << 12;
5747 inst.instruction |= inst.operands[1].reg << 16;
5748 inst.reloc.type = BFD_RELOC_UNUSED;
5754 constraint (inst.operands[0].reg % 2 != 0,
5755 _("even register required"));
5756 constraint (inst.operands[1].present
5757 && inst.operands[1].reg != inst.operands[0].reg + 1,
5758 _("can only load two consecutive registers"));
5759 /* If op 1 were present and equal to PC, this function wouldn't
5760 have been called in the first place. */
5761 constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
5763 inst.instruction |= inst.operands[0].reg << 12;
5764 inst.instruction |= inst.operands[2].reg << 16;
5770 inst.instruction |= inst.operands[0].reg << 12;
5771 if (!inst.operands[1].isreg)
5772 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/FALSE))
5774 encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
5780 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
5782 if (inst.operands[1].preind)
5784 constraint (inst.reloc.exp.X_op != O_constant ||
5785 inst.reloc.exp.X_add_number != 0,
5786 _("this instruction requires a post-indexed address"));
5788 inst.operands[1].preind = 0;
5789 inst.operands[1].postind = 1;
5790 inst.operands[1].writeback = 1;
5792 inst.instruction |= inst.operands[0].reg << 12;
5793 encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
5796 /* Halfword and signed-byte load/store operations. */
5801 inst.instruction |= inst.operands[0].reg << 12;
5802 if (!inst.operands[1].isreg)
5803 if (move_or_literal_pool (0, /*thumb_p=*/FALSE, /*mode_3=*/TRUE))
5805 encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
5811 /* ldrt/strt always use post-indexed addressing. Turn [Rn] into [Rn]! and
5813 if (inst.operands[1].preind)
5815 constraint (inst.reloc.exp.X_op != O_constant ||
5816 inst.reloc.exp.X_add_number != 0,
5817 _("this instruction requires a post-indexed address"));
5819 inst.operands[1].preind = 0;
5820 inst.operands[1].postind = 1;
5821 inst.operands[1].writeback = 1;
5823 inst.instruction |= inst.operands[0].reg << 12;
5824 encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
5827 /* Co-processor register load/store.
5828 Format: <LDC|STC>{cond}[L] CP#,CRd,<address> */
5832 inst.instruction |= inst.operands[0].reg << 8;
5833 inst.instruction |= inst.operands[1].reg << 12;
5834 encode_arm_cp_address (2, TRUE, TRUE, 0);
5840 /* This restriction does not apply to mls (nor to mla in v6, but
5841 that's hard to detect at present). */
5842 if (inst.operands[0].reg == inst.operands[1].reg
5843 && !(inst.instruction & 0x00400000))
5844 as_tsktsk (_("rd and rm should be different in mla"));
5846 inst.instruction |= inst.operands[0].reg << 16;
5847 inst.instruction |= inst.operands[1].reg;
5848 inst.instruction |= inst.operands[2].reg << 8;
5849 inst.instruction |= inst.operands[3].reg << 12;
5856 inst.instruction |= inst.operands[0].reg << 12;
5857 encode_arm_shifter_operand (1);
5860 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>. */
5864 inst.instruction |= inst.operands[0].reg << 12;
5865 /* The value is in two pieces: 0:11, 16:19. */
5866 inst.instruction |= (inst.operands[1].imm & 0x00000fff);
5867 inst.instruction |= (inst.operands[1].imm & 0x0000f000) << 4;
5873 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
5874 constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
5876 _("'CPSR' or 'SPSR' expected"));
5877 inst.instruction |= inst.operands[0].reg << 12;
5878 inst.instruction |= (inst.operands[1].imm & SPSR_BIT);
5881 /* Two possible forms:
5882 "{C|S}PSR_<field>, Rm",
5883 "{C|S}PSR_f, #expression". */
5888 inst.instruction |= inst.operands[0].imm;
5889 if (inst.operands[1].isreg)
5890 inst.instruction |= inst.operands[1].reg;
5893 inst.instruction |= INST_IMMEDIATE;
5894 inst.reloc.type = BFD_RELOC_ARM_IMMEDIATE;
5895 inst.reloc.pc_rel = 0;
5902 if (!inst.operands[2].present)
5903 inst.operands[2].reg = inst.operands[0].reg;
5904 inst.instruction |= inst.operands[0].reg << 16;
5905 inst.instruction |= inst.operands[1].reg;
5906 inst.instruction |= inst.operands[2].reg << 8;
5908 if (inst.operands[0].reg == inst.operands[1].reg)
5909 as_tsktsk (_("rd and rm should be different in mul"));
5912 /* Long Multiply Parser
5913 UMULL RdLo, RdHi, Rm, Rs
5914 SMULL RdLo, RdHi, Rm, Rs
5915 UMLAL RdLo, RdHi, Rm, Rs
5916 SMLAL RdLo, RdHi, Rm, Rs. */
5921 inst.instruction |= inst.operands[0].reg << 12;
5922 inst.instruction |= inst.operands[1].reg << 16;
5923 inst.instruction |= inst.operands[2].reg;
5924 inst.instruction |= inst.operands[3].reg << 8;
5926 /* rdhi, rdlo and rm must all be different. */
5927 if (inst.operands[0].reg == inst.operands[1].reg
5928 || inst.operands[0].reg == inst.operands[2].reg
5929 || inst.operands[1].reg == inst.operands[2].reg)
5930 as_tsktsk (_("rdhi, rdlo and rm must all be different"));
5936 if (inst.operands[0].present)
5938 /* Architectural NOP hints are CPSR sets with no bits selected. */
5939 inst.instruction &= 0xf0000000;
5940 inst.instruction |= 0x0320f000 + inst.operands[0].imm;
5944 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
5945 PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
5946 Condition defaults to COND_ALWAYS.
5947 Error if Rd, Rn or Rm are R15. */
5952 inst.instruction |= inst.operands[0].reg << 12;
5953 inst.instruction |= inst.operands[1].reg << 16;
5954 inst.instruction |= inst.operands[2].reg;
5955 if (inst.operands[3].present)
5956 encode_arm_shift (3);
5959 /* ARM V6 PKHTB (Argument Parse). */
5964 if (!inst.operands[3].present)
5966 /* If the shift specifier is omitted, turn the instruction
5967 into pkhbt rd, rm, rn. */
5968 inst.instruction &= 0xfff00010;
5969 inst.instruction |= inst.operands[0].reg << 12;
5970 inst.instruction |= inst.operands[1].reg;
5971 inst.instruction |= inst.operands[2].reg << 16;
5975 inst.instruction |= inst.operands[0].reg << 12;
5976 inst.instruction |= inst.operands[1].reg << 16;
5977 inst.instruction |= inst.operands[2].reg;
5978 encode_arm_shift (3);
5982 /* ARMv5TE: Preload-Cache
5986 Syntactically, like LDR with B=1, W=0, L=1. */
5991 constraint (!inst.operands[0].isreg,
5992 _("'[' expected after PLD mnemonic"));
5993 constraint (inst.operands[0].postind,
5994 _("post-indexed expression used in preload instruction"));
5995 constraint (inst.operands[0].writeback,
5996 _("writeback used in preload instruction"));
5997 constraint (!inst.operands[0].preind,
5998 _("unindexed addressing used in preload instruction"));
5999 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6002 /* ARMv7: PLI <addr_mode> */
6006 constraint (!inst.operands[0].isreg,
6007 _("'[' expected after PLI mnemonic"));
6008 constraint (inst.operands[0].postind,
6009 _("post-indexed expression used in preload instruction"));
6010 constraint (inst.operands[0].writeback,
6011 _("writeback used in preload instruction"));
6012 constraint (!inst.operands[0].preind,
6013 _("unindexed addressing used in preload instruction"));
6014 encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
6015 inst.instruction &= ~PRE_INDEX;
6021 inst.operands[1] = inst.operands[0];
6022 memset (&inst.operands[0], 0, sizeof inst.operands[0]);
6023 inst.operands[0].isreg = 1;
6024 inst.operands[0].writeback = 1;
6025 inst.operands[0].reg = REG_SP;
6029 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
6030 word at the specified address and the following word
6032 Unconditionally executed.
6033 Error if Rn is R15. */
6038 inst.instruction |= inst.operands[0].reg << 16;
6039 if (inst.operands[0].writeback)
6040 inst.instruction |= WRITE_BACK;
6043 /* ARM V6 ssat (argument parse). */
6048 inst.instruction |= inst.operands[0].reg << 12;
6049 inst.instruction |= (inst.operands[1].imm - 1) << 16;
6050 inst.instruction |= inst.operands[2].reg;
6052 if (inst.operands[3].present)
6053 encode_arm_shift (3);
6056 /* ARM V6 usat (argument parse). */
6061 inst.instruction |= inst.operands[0].reg << 12;
6062 inst.instruction |= inst.operands[1].imm << 16;
6063 inst.instruction |= inst.operands[2].reg;
6065 if (inst.operands[3].present)
6066 encode_arm_shift (3);
6069 /* ARM V6 ssat16 (argument parse). */
6074 inst.instruction |= inst.operands[0].reg << 12;
6075 inst.instruction |= ((inst.operands[1].imm - 1) << 16);
6076 inst.instruction |= inst.operands[2].reg;
6082 inst.instruction |= inst.operands[0].reg << 12;
6083 inst.instruction |= inst.operands[1].imm << 16;
6084 inst.instruction |= inst.operands[2].reg;
6087 /* ARM V6 SETEND (argument parse). Sets the E bit in the CPSR while
6088 preserving the other bits.
6090 setend <endian_specifier>, where <endian_specifier> is either
6096 if (inst.operands[0].imm)
6097 inst.instruction |= 0x200;
6103 unsigned int Rm = (inst.operands[1].present
6104 ? inst.operands[1].reg
6105 : inst.operands[0].reg);
6107 inst.instruction |= inst.operands[0].reg << 12;
6108 inst.instruction |= Rm;
6109 if (inst.operands[2].isreg) /* Rd, {Rm,} Rs */
6111 inst.instruction |= inst.operands[2].reg << 8;
6112 inst.instruction |= SHIFT_BY_REG;
6115 inst.reloc.type = BFD_RELOC_ARM_SHIFT_IMM;
6121 inst.reloc.type = BFD_RELOC_ARM_SMC;
6122 inst.reloc.pc_rel = 0;
6128 inst.reloc.type = BFD_RELOC_ARM_SWI;
6129 inst.reloc.pc_rel = 0;
6132 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
6133 SMLAxy{cond} Rd,Rm,Rs,Rn
6134 SMLAWy{cond} Rd,Rm,Rs,Rn
6135 Error if any register is R15. */
6140 inst.instruction |= inst.operands[0].reg << 16;
6141 inst.instruction |= inst.operands[1].reg;
6142 inst.instruction |= inst.operands[2].reg << 8;
6143 inst.instruction |= inst.operands[3].reg << 12;
6146 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
6147 SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
6148 Error if any register is R15.
6149 Warning if Rdlo == Rdhi. */
6154 inst.instruction |= inst.operands[0].reg << 12;
6155 inst.instruction |= inst.operands[1].reg << 16;
6156 inst.instruction |= inst.operands[2].reg;
6157 inst.instruction |= inst.operands[3].reg << 8;
6159 if (inst.operands[0].reg == inst.operands[1].reg)
6160 as_tsktsk (_("rdhi and rdlo must be different"));
6163 /* ARM V5E (El Segundo) signed-multiply (argument parse)
6164 SMULxy{cond} Rd,Rm,Rs
6165 Error if any register is R15. */
6170 inst.instruction |= inst.operands[0].reg << 16;
6171 inst.instruction |= inst.operands[1].reg;
6172 inst.instruction |= inst.operands[2].reg << 8;
6175 /* ARM V6 srs (argument parse). */
6180 inst.instruction |= inst.operands[0].imm;
6181 if (inst.operands[0].writeback)
6182 inst.instruction |= WRITE_BACK;
6185 /* ARM V6 strex (argument parse). */
6190 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
6191 || inst.operands[2].postind || inst.operands[2].writeback
6192 || inst.operands[2].immisreg || inst.operands[2].shifted
6193 || inst.operands[2].negative
6194 /* See comment in do_ldrex(). */
6195 || (inst.operands[2].reg == REG_PC),
6198 constraint (inst.operands[0].reg == inst.operands[1].reg
6199 || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
6201 constraint (inst.reloc.exp.X_op != O_constant
6202 || inst.reloc.exp.X_add_number != 0,
6203 _("offset must be zero in ARM encoding"));
6205 inst.instruction |= inst.operands[0].reg << 12;
6206 inst.instruction |= inst.operands[1].reg;
6207 inst.instruction |= inst.operands[2].reg << 16;
6208 inst.reloc.type = BFD_RELOC_UNUSED;
6214 constraint (inst.operands[1].reg % 2 != 0,
6215 _("even register required"));
6216 constraint (inst.operands[2].present
6217 && inst.operands[2].reg != inst.operands[1].reg + 1,
6218 _("can only store two consecutive registers"));
6219 /* If op 2 were present and equal to PC, this function wouldn't
6220 have been called in the first place. */
6221 constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
6223 constraint (inst.operands[0].reg == inst.operands[1].reg
6224 || inst.operands[0].reg == inst.operands[1].reg + 1
6225 || inst.operands[0].reg == inst.operands[3].reg,
6228 inst.instruction |= inst.operands[0].reg << 12;
6229 inst.instruction |= inst.operands[1].reg;
6230 inst.instruction |= inst.operands[3].reg << 16;
6233 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
6234 extends it to 32-bits, and adds the result to a value in another
6235 register. You can specify a rotation by 0, 8, 16, or 24 bits
6236 before extracting the 16-bit value.
6237 SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
6238 Condition defaults to COND_ALWAYS.
6239 Error if any register uses R15. */
6244 inst.instruction |= inst.operands[0].reg << 12;
6245 inst.instruction |= inst.operands[1].reg << 16;
6246 inst.instruction |= inst.operands[2].reg;
6247 inst.instruction |= inst.operands[3].imm << 10;
6252 SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
6253 Condition defaults to COND_ALWAYS.
6254 Error if any register uses R15. */
6259 inst.instruction |= inst.operands[0].reg << 12;
6260 inst.instruction |= inst.operands[1].reg;
6261 inst.instruction |= inst.operands[2].imm << 10;
6264 /* VFP instructions. In a logical order: SP variant first, monad
6265 before dyad, arithmetic then move then load/store. */
6268 do_vfp_sp_monadic (void)
6270 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6271 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6275 do_vfp_sp_dyadic (void)
6277 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6278 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6279 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6283 do_vfp_sp_compare_z (void)
6285 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6289 do_vfp_dp_sp_cvt (void)
6291 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6292 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
6296 do_vfp_sp_dp_cvt (void)
6298 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6299 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6303 do_vfp_reg_from_sp (void)
6305 inst.instruction |= inst.operands[0].reg << 12;
6306 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
6310 do_vfp_reg2_from_sp2 (void)
6312 constraint (inst.operands[2].imm != 2,
6313 _("only two consecutive VFP SP registers allowed here"));
6314 inst.instruction |= inst.operands[0].reg << 12;
6315 inst.instruction |= inst.operands[1].reg << 16;
6316 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
6320 do_vfp_sp_from_reg (void)
6322 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
6323 inst.instruction |= inst.operands[1].reg << 12;
6327 do_vfp_sp2_from_reg2 (void)
6329 constraint (inst.operands[0].imm != 2,
6330 _("only two consecutive VFP SP registers allowed here"));
6331 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
6332 inst.instruction |= inst.operands[1].reg << 12;
6333 inst.instruction |= inst.operands[2].reg << 16;
6337 do_vfp_sp_ldst (void)
6339 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6340 encode_arm_cp_address (1, FALSE, TRUE, 0);
6344 do_vfp_dp_ldst (void)
6346 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6347 encode_arm_cp_address (1, FALSE, TRUE, 0);
6352 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
6354 if (inst.operands[0].writeback)
6355 inst.instruction |= WRITE_BACK;
6357 constraint (ldstm_type != VFP_LDSTMIA,
6358 _("this addressing mode requires base-register writeback"));
6359 inst.instruction |= inst.operands[0].reg << 16;
6360 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
6361 inst.instruction |= inst.operands[1].imm;
6365 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
6369 if (inst.operands[0].writeback)
6370 inst.instruction |= WRITE_BACK;
6372 constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
6373 _("this addressing mode requires base-register writeback"));
6375 inst.instruction |= inst.operands[0].reg << 16;
6376 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6378 count = inst.operands[1].imm << 1;
6379 if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
6382 inst.instruction |= count;
6386 do_vfp_sp_ldstmia (void)
6388 vfp_sp_ldstm (VFP_LDSTMIA);
6392 do_vfp_sp_ldstmdb (void)
6394 vfp_sp_ldstm (VFP_LDSTMDB);
6398 do_vfp_dp_ldstmia (void)
6400 vfp_dp_ldstm (VFP_LDSTMIA);
6404 do_vfp_dp_ldstmdb (void)
6406 vfp_dp_ldstm (VFP_LDSTMDB);
6410 do_vfp_xp_ldstmia (void)
6412 vfp_dp_ldstm (VFP_LDSTMIAX);
6416 do_vfp_xp_ldstmdb (void)
6418 vfp_dp_ldstm (VFP_LDSTMDBX);
6422 do_vfp_dp_rd_rm (void)
6424 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6425 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
6429 do_vfp_dp_rn_rd (void)
6431 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
6432 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6436 do_vfp_dp_rd_rn (void)
6438 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6439 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
6443 do_vfp_dp_rd_rn_rm (void)
6445 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6446 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
6447 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
6453 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6457 do_vfp_dp_rm_rd_rn (void)
6459 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
6460 encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
6461 encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
6464 /* VFPv3 instructions. */
6466 do_vfp_sp_const (void)
6468 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6469 inst.instruction |= (inst.operands[1].imm & 15) << 16;
6470 inst.instruction |= (inst.operands[1].imm >> 4);
6474 do_vfp_dp_const (void)
6476 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6477 inst.instruction |= (inst.operands[1].imm & 15) << 16;
6478 inst.instruction |= (inst.operands[1].imm >> 4);
6482 vfp_conv (int srcsize)
6484 unsigned immbits = srcsize - inst.operands[1].imm;
6485 inst.instruction |= (immbits & 1) << 5;
6486 inst.instruction |= (immbits >> 1);
6490 do_vfp_sp_conv_16 (void)
6492 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6497 do_vfp_dp_conv_16 (void)
6499 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6504 do_vfp_sp_conv_32 (void)
6506 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
6511 do_vfp_dp_conv_32 (void)
6513 encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
6518 /* FPA instructions. Also in a logical order. */
6523 inst.instruction |= inst.operands[0].reg << 16;
6524 inst.instruction |= inst.operands[1].reg;
6528 do_fpa_ldmstm (void)
6530 inst.instruction |= inst.operands[0].reg << 12;
6531 switch (inst.operands[1].imm)
6533 case 1: inst.instruction |= CP_T_X; break;
6534 case 2: inst.instruction |= CP_T_Y; break;
6535 case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
6540 if (inst.instruction & (PRE_INDEX | INDEX_UP))
6542 /* The instruction specified "ea" or "fd", so we can only accept
6543 [Rn]{!}. The instruction does not really support stacking or
6544 unstacking, so we have to emulate these by setting appropriate
6545 bits and offsets. */
6546 constraint (inst.reloc.exp.X_op != O_constant
6547 || inst.reloc.exp.X_add_number != 0,
6548 _("this instruction does not support indexing"));
6550 if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
6551 inst.reloc.exp.X_add_number = 12 * inst.operands[1].imm;
6553 if (!(inst.instruction & INDEX_UP))
6554 inst.reloc.exp.X_add_number = -inst.reloc.exp.X_add_number;
6556 if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
6558 inst.operands[2].preind = 0;
6559 inst.operands[2].postind = 1;
6563 encode_arm_cp_address (2, TRUE, TRUE, 0);
6566 /* iWMMXt instructions: strictly in alphabetical order. */
6569 do_iwmmxt_tandorc (void)
6571 constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
6575 do_iwmmxt_textrc (void)
6577 inst.instruction |= inst.operands[0].reg << 12;
6578 inst.instruction |= inst.operands[1].imm;
6582 do_iwmmxt_textrm (void)
6584 inst.instruction |= inst.operands[0].reg << 12;
6585 inst.instruction |= inst.operands[1].reg << 16;
6586 inst.instruction |= inst.operands[2].imm;
6590 do_iwmmxt_tinsr (void)
6592 inst.instruction |= inst.operands[0].reg << 16;
6593 inst.instruction |= inst.operands[1].reg << 12;
6594 inst.instruction |= inst.operands[2].imm;
6598 do_iwmmxt_tmia (void)
6600 inst.instruction |= inst.operands[0].reg << 5;
6601 inst.instruction |= inst.operands[1].reg;
6602 inst.instruction |= inst.operands[2].reg << 12;
6606 do_iwmmxt_waligni (void)
6608 inst.instruction |= inst.operands[0].reg << 12;
6609 inst.instruction |= inst.operands[1].reg << 16;
6610 inst.instruction |= inst.operands[2].reg;
6611 inst.instruction |= inst.operands[3].imm << 20;
6615 do_iwmmxt_wmov (void)
6617 /* WMOV rD, rN is an alias for WOR rD, rN, rN. */
6618 inst.instruction |= inst.operands[0].reg << 12;
6619 inst.instruction |= inst.operands[1].reg << 16;
6620 inst.instruction |= inst.operands[1].reg;
6624 do_iwmmxt_wldstbh (void)
6627 inst.instruction |= inst.operands[0].reg << 12;
6628 inst.reloc.exp.X_add_number *= 4;
6630 reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
6632 reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
6633 encode_arm_cp_address (1, TRUE, FALSE, reloc);
6637 do_iwmmxt_wldstw (void)
6639 /* RIWR_RIWC clears .isreg for a control register. */
6640 if (!inst.operands[0].isreg)
6642 constraint (inst.cond != COND_ALWAYS, BAD_COND);
6643 inst.instruction |= 0xf0000000;
6646 inst.instruction |= inst.operands[0].reg << 12;
6647 encode_arm_cp_address (1, TRUE, TRUE, 0);
6651 do_iwmmxt_wldstd (void)
6653 inst.instruction |= inst.operands[0].reg << 12;
6654 encode_arm_cp_address (1, TRUE, FALSE, 0);
6658 do_iwmmxt_wshufh (void)
6660 inst.instruction |= inst.operands[0].reg << 12;
6661 inst.instruction |= inst.operands[1].reg << 16;
6662 inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
6663 inst.instruction |= (inst.operands[2].imm & 0x0f);
6667 do_iwmmxt_wzero (void)
6669 /* WZERO reg is an alias for WANDN reg, reg, reg. */
6670 inst.instruction |= inst.operands[0].reg;
6671 inst.instruction |= inst.operands[0].reg << 12;
6672 inst.instruction |= inst.operands[0].reg << 16;
6675 /* Cirrus Maverick instructions. Simple 2-, 3-, and 4-register
6676 operations first, then control, shift, and load/store. */
6678 /* Insns like "foo X,Y,Z". */
6681 do_mav_triple (void)
6683 inst.instruction |= inst.operands[0].reg << 16;
6684 inst.instruction |= inst.operands[1].reg;
6685 inst.instruction |= inst.operands[2].reg << 12;
6688 /* Insns like "foo W,X,Y,Z".
6689 where W=MVAX[0:3] and X,Y,Z=MVFX[0:15]. */
6694 inst.instruction |= inst.operands[0].reg << 5;
6695 inst.instruction |= inst.operands[1].reg << 12;
6696 inst.instruction |= inst.operands[2].reg << 16;
6697 inst.instruction |= inst.operands[3].reg;
6700 /* cfmvsc32<cond> DSPSC,MVDX[15:0]. */
6704 inst.instruction |= inst.operands[1].reg << 12;
6707 /* Maverick shift immediate instructions.
6708 cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
6709 cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0]. */
6714 int imm = inst.operands[2].imm;
6716 inst.instruction |= inst.operands[0].reg << 12;
6717 inst.instruction |= inst.operands[1].reg << 16;
6719 /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
6720 Bits 5-7 of the insn should have bits 4-6 of the immediate.
6721 Bit 4 should be 0. */
6722 imm = (imm & 0xf) | ((imm & 0x70) << 1);
6724 inst.instruction |= imm;
6727 /* XScale instructions. Also sorted arithmetic before move. */
6729 /* Xscale multiply-accumulate (argument parse)
6732 MIAxycc acc0,Rm,Rs. */
6737 inst.instruction |= inst.operands[1].reg;
6738 inst.instruction |= inst.operands[2].reg << 12;
6741 /* Xscale move-accumulator-register (argument parse)
6743 MARcc acc0,RdLo,RdHi. */
6748 inst.instruction |= inst.operands[1].reg << 12;
6749 inst.instruction |= inst.operands[2].reg << 16;
6752 /* Xscale move-register-accumulator (argument parse)
6754 MRAcc RdLo,RdHi,acc0. */
6759 constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
6760 inst.instruction |= inst.operands[0].reg << 12;
6761 inst.instruction |= inst.operands[1].reg << 16;
6764 /* Encoding functions relevant only to Thumb. */
6766 /* inst.operands[i] is a shifted-register operand; encode
6767 it into inst.instruction in the format used by Thumb32. */
6770 encode_thumb32_shifted_operand (int i)
6772 unsigned int value = inst.reloc.exp.X_add_number;
6773 unsigned int shift = inst.operands[i].shift_kind;
6775 constraint (inst.operands[i].immisreg,
6776 _("shift by register not allowed in thumb mode"));
6777 inst.instruction |= inst.operands[i].reg;
6778 if (shift == SHIFT_RRX)
6779 inst.instruction |= SHIFT_ROR << 4;
6782 constraint (inst.reloc.exp.X_op != O_constant,
6783 _("expression too complex"));
6785 constraint (value > 32
6786 || (value == 32 && (shift == SHIFT_LSL
6787 || shift == SHIFT_ROR)),
6788 _("shift expression is too large"));
6792 else if (value == 32)
6795 inst.instruction |= shift << 4;
6796 inst.instruction |= (value & 0x1c) << 10;
6797 inst.instruction |= (value & 0x03) << 6;
6802 /* inst.operands[i] was set up by parse_address. Encode it into a
6803 Thumb32 format load or store instruction. Reject forms that cannot
6804 be used with such instructions. If is_t is true, reject forms that
6805 cannot be used with a T instruction; if is_d is true, reject forms
6806 that cannot be used with a D instruction. */
6809 encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
6811 bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
6813 constraint (!inst.operands[i].isreg,
6814 _("Instruction does not support =N addresses"));
6816 inst.instruction |= inst.operands[i].reg << 16;
6817 if (inst.operands[i].immisreg)
6819 constraint (is_pc, _("cannot use register index with PC-relative addressing"));
6820 constraint (is_t || is_d, _("cannot use register index with this instruction"));
6821 constraint (inst.operands[i].negative,
6822 _("Thumb does not support negative register indexing"));
6823 constraint (inst.operands[i].postind,
6824 _("Thumb does not support register post-indexing"));
6825 constraint (inst.operands[i].writeback,
6826 _("Thumb does not support register indexing with writeback"));
6827 constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
6828 _("Thumb supports only LSL in shifted register indexing"));
6830 inst.instruction |= inst.operands[i].imm;
6831 if (inst.operands[i].shifted)
6833 constraint (inst.reloc.exp.X_op != O_constant,
6834 _("expression too complex"));
6835 constraint (inst.reloc.exp.X_add_number < 0
6836 || inst.reloc.exp.X_add_number > 3,
6837 _("shift out of range"));
6838 inst.instruction |= inst.reloc.exp.X_add_number << 4;
6840 inst.reloc.type = BFD_RELOC_UNUSED;
6842 else if (inst.operands[i].preind)
6844 constraint (is_pc && inst.operands[i].writeback,
6845 _("cannot use writeback with PC-relative addressing"));
6846 constraint (is_t && inst.operands[i].writeback,
6847 _("cannot use writeback with this instruction"));
6851 inst.instruction |= 0x01000000;
6852 if (inst.operands[i].writeback)
6853 inst.instruction |= 0x00200000;
6857 inst.instruction |= 0x00000c00;
6858 if (inst.operands[i].writeback)
6859 inst.instruction |= 0x00000100;
6861 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
6863 else if (inst.operands[i].postind)
6865 assert (inst.operands[i].writeback);
6866 constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
6867 constraint (is_t, _("cannot use post-indexing with this instruction"));
6870 inst.instruction |= 0x00200000;
6872 inst.instruction |= 0x00000900;
6873 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_IMM;
6875 else /* unindexed - only for coprocessor */
6876 inst.error = _("instruction does not accept unindexed addressing");
6879 /* Table of Thumb instructions which exist in both 16- and 32-bit
6880 encodings (the latter only in post-V6T2 cores). The index is the
6881 value used in the insns table below. When there is more than one
6882 possible 16-bit encoding for the instruction, this table always
6884 Also contains several pseudo-instructions used during relaxation. */
6885 #define T16_32_TAB \
6886 X(adc, 4140, eb400000), \
6887 X(adcs, 4140, eb500000), \
6888 X(add, 1c00, eb000000), \
6889 X(adds, 1c00, eb100000), \
6890 X(addi, 0000, f1000000), \
6891 X(addis, 0000, f1100000), \
6892 X(add_pc,000f, f20f0000), \
6893 X(add_sp,000d, f10d0000), \
6894 X(adr, 000f, f20f0000), \
6895 X(and, 4000, ea000000), \
6896 X(ands, 4000, ea100000), \
6897 X(asr, 1000, fa40f000), \
6898 X(asrs, 1000, fa50f000), \
6899 X(b, e000, f000b000), \
6900 X(bcond, d000, f0008000), \
6901 X(bic, 4380, ea200000), \
6902 X(bics, 4380, ea300000), \
6903 X(cmn, 42c0, eb100f00), \
6904 X(cmp, 2800, ebb00f00), \
6905 X(cpsie, b660, f3af8400), \
6906 X(cpsid, b670, f3af8600), \
6907 X(cpy, 4600, ea4f0000), \
6908 X(dec_sp,80dd, f1bd0d00), \
6909 X(eor, 4040, ea800000), \
6910 X(eors, 4040, ea900000), \
6911 X(inc_sp,00dd, f10d0d00), \
6912 X(ldmia, c800, e8900000), \
6913 X(ldr, 6800, f8500000), \
6914 X(ldrb, 7800, f8100000), \
6915 X(ldrh, 8800, f8300000), \
6916 X(ldrsb, 5600, f9100000), \
6917 X(ldrsh, 5e00, f9300000), \
6918 X(ldr_pc,4800, f85f0000), \
6919 X(ldr_pc2,4800, f85f0000), \
6920 X(ldr_sp,9800, f85d0000), \
6921 X(lsl, 0000, fa00f000), \
6922 X(lsls, 0000, fa10f000), \
6923 X(lsr, 0800, fa20f000), \
6924 X(lsrs, 0800, fa30f000), \
6925 X(mov, 2000, ea4f0000), \
6926 X(movs, 2000, ea5f0000), \
6927 X(mul, 4340, fb00f000), \
6928 X(muls, 4340, ffffffff), /* no 32b muls */ \
6929 X(mvn, 43c0, ea6f0000), \
6930 X(mvns, 43c0, ea7f0000), \
6931 X(neg, 4240, f1c00000), /* rsb #0 */ \
6932 X(negs, 4240, f1d00000), /* rsbs #0 */ \
6933 X(orr, 4300, ea400000), \
6934 X(orrs, 4300, ea500000), \
6935 X(pop, bc00, e8bd0000), /* ldmia sp!,... */ \
6936 X(push, b400, e92d0000), /* stmdb sp!,... */ \
6937 X(rev, ba00, fa90f080), \
6938 X(rev16, ba40, fa90f090), \
6939 X(revsh, bac0, fa90f0b0), \
6940 X(ror, 41c0, fa60f000), \
6941 X(rors, 41c0, fa70f000), \
6942 X(sbc, 4180, eb600000), \
6943 X(sbcs, 4180, eb700000), \
6944 X(stmia, c000, e8800000), \
6945 X(str, 6000, f8400000), \
6946 X(strb, 7000, f8000000), \
6947 X(strh, 8000, f8200000), \
6948 X(str_sp,9000, f84d0000), \
6949 X(sub, 1e00, eba00000), \
6950 X(subs, 1e00, ebb00000), \
6951 X(subi, 8000, f1a00000), \
6952 X(subis, 8000, f1b00000), \
6953 X(sxtb, b240, fa4ff080), \
6954 X(sxth, b200, fa0ff080), \
6955 X(tst, 4200, ea100f00), \
6956 X(uxtb, b2c0, fa5ff080), \
6957 X(uxth, b280, fa1ff080), \
6958 X(nop, bf00, f3af8000), \
6959 X(yield, bf10, f3af8001), \
6960 X(wfe, bf20, f3af8002), \
6961 X(wfi, bf30, f3af8003), \
6962 X(sev, bf40, f3af9004), /* typo, 8004? */
6964 /* To catch errors in encoding functions, the codes are all offset by
6965 0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
6966 as 16-bit instructions. */
6967 #define X(a,b,c) T_MNEM_##a
6968 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
6971 #define X(a,b,c) 0x##b
6972 static const unsigned short thumb_op16[] = { T16_32_TAB };
6973 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
6976 #define X(a,b,c) 0x##c
6977 static const unsigned int thumb_op32[] = { T16_32_TAB };
6978 #define THUMB_OP32(n) (thumb_op32[(n) - (T16_32_OFFSET + 1)])
6979 #define THUMB_SETS_FLAGS(n) (THUMB_OP32 (n) & 0x00100000)
6983 /* Thumb instruction encoders, in alphabetical order. */
6987 do_t_add_sub_w (void)
6991 Rd = inst.operands[0].reg;
6992 Rn = inst.operands[1].reg;
6994 constraint (Rd == 15, _("PC not allowed as destination"));
6995 inst.instruction |= (Rn << 16) | (Rd << 8);
6996 inst.reloc.type = BFD_RELOC_ARM_T32_IMM12;
6999 /* Parse an add or subtract instruction. We get here with inst.instruction
7000 equalling any of THUMB_OPCODE_add, adds, sub, or subs. */
7007 Rd = inst.operands[0].reg;
7008 Rs = (inst.operands[1].present
7009 ? inst.operands[1].reg /* Rd, Rs, foo */
7010 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7018 flags = (inst.instruction == T_MNEM_adds
7019 || inst.instruction == T_MNEM_subs);
7021 narrow = (current_it_mask == 0);
7023 narrow = (current_it_mask != 0);
7024 if (!inst.operands[2].isreg)
7027 if (inst.size_req != 4)
7031 add = (inst.instruction == T_MNEM_add
7032 || inst.instruction == T_MNEM_adds);
7033 /* Attempt to use a narrow opcode, with relaxation if
7035 if (Rd == REG_SP && Rs == REG_SP && !flags)
7036 opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
7037 else if (Rd <= 7 && Rs == REG_SP && add && !flags)
7038 opcode = T_MNEM_add_sp;
7039 else if (Rd <= 7 && Rs == REG_PC && add && !flags)
7040 opcode = T_MNEM_add_pc;
7041 else if (Rd <= 7 && Rs <= 7 && narrow)
7044 opcode = add ? T_MNEM_addis : T_MNEM_subis;
7046 opcode = add ? T_MNEM_addi : T_MNEM_subi;
7050 inst.instruction = THUMB_OP16(opcode);
7051 inst.instruction |= (Rd << 4) | Rs;
7052 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7053 if (inst.size_req != 2)
7054 inst.relax = opcode;
7057 constraint (inst.size_req == 2, BAD_HIREG);
7059 if (inst.size_req == 4
7060 || (inst.size_req != 2 && !opcode))
7062 /* ??? Convert large immediates to addw/subw. */
7063 inst.instruction = THUMB_OP32 (inst.instruction);
7064 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7065 inst.instruction |= inst.operands[0].reg << 8;
7066 inst.instruction |= inst.operands[1].reg << 16;
7067 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7072 Rn = inst.operands[2].reg;
7073 /* See if we can do this with a 16-bit instruction. */
7074 if (!inst.operands[2].shifted && inst.size_req != 4)
7076 if (Rd > 7 || Rs > 7 || Rn > 7)
7081 inst.instruction = ((inst.instruction == T_MNEM_adds
7082 || inst.instruction == T_MNEM_add)
7085 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7089 if (inst.instruction == T_MNEM_add)
7093 inst.instruction = T_OPCODE_ADD_HI;
7094 inst.instruction |= (Rd & 8) << 4;
7095 inst.instruction |= (Rd & 7);
7096 inst.instruction |= Rn << 3;
7099 /* ... because addition is commutative! */
7102 inst.instruction = T_OPCODE_ADD_HI;
7103 inst.instruction |= (Rd & 8) << 4;
7104 inst.instruction |= (Rd & 7);
7105 inst.instruction |= Rs << 3;
7110 /* If we get here, it can't be done in 16 bits. */
7111 constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
7112 _("shift must be constant"));
7113 inst.instruction = THUMB_OP32 (inst.instruction);
7114 inst.instruction |= Rd << 8;
7115 inst.instruction |= Rs << 16;
7116 encode_thumb32_shifted_operand (2);
7121 constraint (inst.instruction == T_MNEM_adds
7122 || inst.instruction == T_MNEM_subs,
7125 if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
7127 constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
7128 || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
7131 inst.instruction = (inst.instruction == T_MNEM_add
7133 inst.instruction |= (Rd << 4) | Rs;
7134 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7138 Rn = inst.operands[2].reg;
7139 constraint (inst.operands[2].shifted, _("unshifted register required"));
7141 /* We now have Rd, Rs, and Rn set to registers. */
7142 if (Rd > 7 || Rs > 7 || Rn > 7)
7144 /* Can't do this for SUB. */
7145 constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
7146 inst.instruction = T_OPCODE_ADD_HI;
7147 inst.instruction |= (Rd & 8) << 4;
7148 inst.instruction |= (Rd & 7);
7150 inst.instruction |= Rn << 3;
7152 inst.instruction |= Rs << 3;
7154 constraint (1, _("dest must overlap one source register"));
7158 inst.instruction = (inst.instruction == T_MNEM_add
7159 ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
7160 inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
7168 if (unified_syntax && inst.size_req == 0 && inst.operands[0].reg <= 7)
7170 /* Defer to section relaxation. */
7171 inst.relax = inst.instruction;
7172 inst.instruction = THUMB_OP16 (inst.instruction);
7173 inst.instruction |= inst.operands[0].reg << 4;
7175 else if (unified_syntax && inst.size_req != 2)
7177 /* Generate a 32-bit opcode. */
7178 inst.instruction = THUMB_OP32 (inst.instruction);
7179 inst.instruction |= inst.operands[0].reg << 8;
7180 inst.reloc.type = BFD_RELOC_ARM_T32_ADD_PC12;
7181 inst.reloc.pc_rel = 1;
7185 /* Generate a 16-bit opcode. */
7186 inst.instruction = THUMB_OP16 (inst.instruction);
7187 inst.reloc.type = BFD_RELOC_ARM_THUMB_ADD;
7188 inst.reloc.exp.X_add_number -= 4; /* PC relative adjust. */
7189 inst.reloc.pc_rel = 1;
7191 inst.instruction |= inst.operands[0].reg << 4;
7195 /* Arithmetic instructions for which there is just one 16-bit
7196 instruction encoding, and it allows only two low registers.
7197 For maximal compatibility with ARM syntax, we allow three register
7198 operands even when Thumb-32 instructions are not available, as long
7199 as the first two are identical. For instance, both "sbc r0,r1" and
7200 "sbc r0,r0,r1" are allowed. */
7206 Rd = inst.operands[0].reg;
7207 Rs = (inst.operands[1].present
7208 ? inst.operands[1].reg /* Rd, Rs, foo */
7209 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7210 Rn = inst.operands[2].reg;
7214 if (!inst.operands[2].isreg)
7216 /* For an immediate, we always generate a 32-bit opcode;
7217 section relaxation will shrink it later if possible. */
7218 inst.instruction = THUMB_OP32 (inst.instruction);
7219 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7220 inst.instruction |= Rd << 8;
7221 inst.instruction |= Rs << 16;
7222 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7228 /* See if we can do this with a 16-bit instruction. */
7229 if (THUMB_SETS_FLAGS (inst.instruction))
7230 narrow = current_it_mask == 0;
7232 narrow = current_it_mask != 0;
7234 if (Rd > 7 || Rn > 7 || Rs > 7)
7236 if (inst.operands[2].shifted)
7238 if (inst.size_req == 4)
7244 inst.instruction = THUMB_OP16 (inst.instruction);
7245 inst.instruction |= Rd;
7246 inst.instruction |= Rn << 3;
7250 /* If we get here, it can't be done in 16 bits. */
7251 constraint (inst.operands[2].shifted
7252 && inst.operands[2].immisreg,
7253 _("shift must be constant"));
7254 inst.instruction = THUMB_OP32 (inst.instruction);
7255 inst.instruction |= Rd << 8;
7256 inst.instruction |= Rs << 16;
7257 encode_thumb32_shifted_operand (2);
7262 /* On its face this is a lie - the instruction does set the
7263 flags. However, the only supported mnemonic in this mode
7265 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7267 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7268 _("unshifted register required"));
7269 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7270 constraint (Rd != Rs,
7271 _("dest and source1 must be the same register"));
7273 inst.instruction = THUMB_OP16 (inst.instruction);
7274 inst.instruction |= Rd;
7275 inst.instruction |= Rn << 3;
7279 /* Similarly, but for instructions where the arithmetic operation is
7280 commutative, so we can allow either of them to be different from
7281 the destination operand in a 16-bit instruction. For instance, all
7282 three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
7289 Rd = inst.operands[0].reg;
7290 Rs = (inst.operands[1].present
7291 ? inst.operands[1].reg /* Rd, Rs, foo */
7292 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
7293 Rn = inst.operands[2].reg;
7297 if (!inst.operands[2].isreg)
7299 /* For an immediate, we always generate a 32-bit opcode;
7300 section relaxation will shrink it later if possible. */
7301 inst.instruction = THUMB_OP32 (inst.instruction);
7302 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
7303 inst.instruction |= Rd << 8;
7304 inst.instruction |= Rs << 16;
7305 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
7311 /* See if we can do this with a 16-bit instruction. */
7312 if (THUMB_SETS_FLAGS (inst.instruction))
7313 narrow = current_it_mask == 0;
7315 narrow = current_it_mask != 0;
7317 if (Rd > 7 || Rn > 7 || Rs > 7)
7319 if (inst.operands[2].shifted)
7321 if (inst.size_req == 4)
7328 inst.instruction = THUMB_OP16 (inst.instruction);
7329 inst.instruction |= Rd;
7330 inst.instruction |= Rn << 3;
7335 inst.instruction = THUMB_OP16 (inst.instruction);
7336 inst.instruction |= Rd;
7337 inst.instruction |= Rs << 3;
7342 /* If we get here, it can't be done in 16 bits. */
7343 constraint (inst.operands[2].shifted
7344 && inst.operands[2].immisreg,
7345 _("shift must be constant"));
7346 inst.instruction = THUMB_OP32 (inst.instruction);
7347 inst.instruction |= Rd << 8;
7348 inst.instruction |= Rs << 16;
7349 encode_thumb32_shifted_operand (2);
7354 /* On its face this is a lie - the instruction does set the
7355 flags. However, the only supported mnemonic in this mode
7357 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
7359 constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
7360 _("unshifted register required"));
7361 constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
7363 inst.instruction = THUMB_OP16 (inst.instruction);
7364 inst.instruction |= Rd;
7367 inst.instruction |= Rn << 3;
7369 inst.instruction |= Rs << 3;
7371 constraint (1, _("dest must overlap one source register"));
7378 if (inst.operands[0].present)
7380 constraint ((inst.instruction & 0xf0) != 0x40
7381 && inst.operands[0].imm != 0xf,
7382 "bad barrier type");
7383 inst.instruction |= inst.operands[0].imm;
7386 inst.instruction |= 0xf;
7392 unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
7393 constraint (msb > 32, _("bit-field extends past end of register"));
7394 /* The instruction encoding stores the LSB and MSB,
7395 not the LSB and width. */
7396 inst.instruction |= inst.operands[0].reg << 8;
7397 inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
7398 inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
7399 inst.instruction |= msb - 1;
7407 /* #0 in second position is alternative syntax for bfc, which is
7408 the same instruction but with REG_PC in the Rm field. */
7409 if (!inst.operands[1].isreg)
7410 inst.operands[1].reg = REG_PC;
7412 msb = inst.operands[2].imm + inst.operands[3].imm;
7413 constraint (msb > 32, _("bit-field extends past end of register"));
7414 /* The instruction encoding stores the LSB and MSB,
7415 not the LSB and width. */
7416 inst.instruction |= inst.operands[0].reg << 8;
7417 inst.instruction |= inst.operands[1].reg << 16;
7418 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7419 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7420 inst.instruction |= msb - 1;
7426 constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
7427 _("bit-field extends past end of register"));
7428 inst.instruction |= inst.operands[0].reg << 8;
7429 inst.instruction |= inst.operands[1].reg << 16;
7430 inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
7431 inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
7432 inst.instruction |= inst.operands[3].imm - 1;
7435 /* ARM V5 Thumb BLX (argument parse)
7436 BLX <target_addr> which is BLX(1)
7437 BLX <Rm> which is BLX(2)
7438 Unfortunately, there are two different opcodes for this mnemonic.
7439 So, the insns[].value is not used, and the code here zaps values
7440 into inst.instruction.
7442 ??? How to take advantage of the additional two bits of displacement
7443 available in Thumb32 mode? Need new relocation? */
7448 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
7449 if (inst.operands[0].isreg)
7450 /* We have a register, so this is BLX(2). */
7451 inst.instruction |= inst.operands[0].reg << 3;
7454 /* No register. This must be BLX(1). */
7455 inst.instruction = 0xf000e800;
7457 if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
7458 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
7461 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BLX;
7462 inst.reloc.pc_rel = 1;
7472 if (current_it_mask)
7474 /* Conditional branches inside IT blocks are encoded as unconditional
7477 /* A branch must be the last instruction in an IT block. */
7478 constraint (current_it_mask != 0x10, BAD_BRANCH);
7483 if (cond != COND_ALWAYS)
7484 opcode = T_MNEM_bcond;
7486 opcode = inst.instruction;
7488 if (unified_syntax && inst.size_req == 4)
7490 inst.instruction = THUMB_OP32(opcode);
7491 if (cond == COND_ALWAYS)
7492 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH25;
7495 assert (cond != 0xF);
7496 inst.instruction |= cond << 22;
7497 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH20;
7502 inst.instruction = THUMB_OP16(opcode);
7503 if (cond == COND_ALWAYS)
7504 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH12;
7507 inst.instruction |= cond << 8;
7508 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH9;
7510 /* Allow section relaxation. */
7511 if (unified_syntax && inst.size_req != 2)
7512 inst.relax = opcode;
7515 inst.reloc.pc_rel = 1;
7521 constraint (inst.cond != COND_ALWAYS,
7522 _("instruction is always unconditional"));
7523 if (inst.operands[0].present)
7525 constraint (inst.operands[0].imm > 255,
7526 _("immediate value out of range"));
7527 inst.instruction |= inst.operands[0].imm;
7532 do_t_branch23 (void)
7534 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
7535 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH23;
7536 inst.reloc.pc_rel = 1;
7538 /* If the destination of the branch is a defined symbol which does not have
7539 the THUMB_FUNC attribute, then we must be calling a function which has
7540 the (interfacearm) attribute. We look for the Thumb entry point to that
7541 function and change the branch to refer to that function instead. */
7542 if ( inst.reloc.exp.X_op == O_symbol
7543 && inst.reloc.exp.X_add_symbol != NULL
7544 && S_IS_DEFINED (inst.reloc.exp.X_add_symbol)
7545 && ! THUMB_IS_FUNC (inst.reloc.exp.X_add_symbol))
7546 inst.reloc.exp.X_add_symbol =
7547 find_real_start (inst.reloc.exp.X_add_symbol);
7553 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
7554 inst.instruction |= inst.operands[0].reg << 3;
7555 /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC. The reloc
7556 should cause the alignment to be checked once it is known. This is
7557 because BX PC only works if the instruction is word aligned. */
7563 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
7564 if (inst.operands[0].reg == REG_PC)
7565 as_tsktsk (_("use of r15 in bxj is not really useful"));
7567 inst.instruction |= inst.operands[0].reg << 16;
7573 inst.instruction |= inst.operands[0].reg << 8;
7574 inst.instruction |= inst.operands[1].reg << 16;
7575 inst.instruction |= inst.operands[1].reg;
7581 constraint (current_it_mask, BAD_NOT_IT);
7582 inst.instruction |= inst.operands[0].imm;
7588 constraint (current_it_mask, BAD_NOT_IT);
7590 && (inst.operands[1].present || inst.size_req == 4)
7591 && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
7593 unsigned int imod = (inst.instruction & 0x0030) >> 4;
7594 inst.instruction = 0xf3af8000;
7595 inst.instruction |= imod << 9;
7596 inst.instruction |= inst.operands[0].imm << 5;
7597 if (inst.operands[1].present)
7598 inst.instruction |= 0x100 | inst.operands[1].imm;
7602 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
7603 && (inst.operands[0].imm & 4),
7604 _("selected processor does not support 'A' form "
7605 "of this instruction"));
7606 constraint (inst.operands[1].present || inst.size_req == 4,
7607 _("Thumb does not support the 2-argument "
7608 "form of this instruction"));
7609 inst.instruction |= inst.operands[0].imm;
7613 /* THUMB CPY instruction (argument parse). */
7618 if (inst.size_req == 4)
7620 inst.instruction = THUMB_OP32 (T_MNEM_mov);
7621 inst.instruction |= inst.operands[0].reg << 8;
7622 inst.instruction |= inst.operands[1].reg;
7626 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
7627 inst.instruction |= (inst.operands[0].reg & 0x7);
7628 inst.instruction |= inst.operands[1].reg << 3;
7635 constraint (current_it_mask, BAD_NOT_IT);
7636 constraint (inst.operands[0].reg > 7, BAD_HIREG);
7637 inst.instruction |= inst.operands[0].reg;
7638 inst.reloc.pc_rel = 1;
7639 inst.reloc.type = BFD_RELOC_THUMB_PCREL_BRANCH7;
7645 inst.instruction |= inst.operands[0].imm;
7651 if (!inst.operands[1].present)
7652 inst.operands[1].reg = inst.operands[0].reg;
7653 inst.instruction |= inst.operands[0].reg << 8;
7654 inst.instruction |= inst.operands[1].reg << 16;
7655 inst.instruction |= inst.operands[2].reg;
7661 if (unified_syntax && inst.size_req == 4)
7662 inst.instruction = THUMB_OP32 (inst.instruction);
7664 inst.instruction = THUMB_OP16 (inst.instruction);
7670 unsigned int cond = inst.operands[0].imm;
7672 constraint (current_it_mask, BAD_NOT_IT);
7673 current_it_mask = (inst.instruction & 0xf) | 0x10;
7676 /* If the condition is a negative condition, invert the mask. */
7677 if ((cond & 0x1) == 0x0)
7679 unsigned int mask = inst.instruction & 0x000f;
7681 if ((mask & 0x7) == 0)
7682 /* no conversion needed */;
7683 else if ((mask & 0x3) == 0)
7685 else if ((mask & 0x1) == 0)
7690 inst.instruction &= 0xfff0;
7691 inst.instruction |= mask;
7694 inst.instruction |= cond << 4;
7700 /* This really doesn't seem worth it. */
7701 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
7702 _("expression too complex"));
7703 constraint (inst.operands[1].writeback,
7704 _("Thumb load/store multiple does not support {reglist}^"));
7708 /* See if we can use a 16-bit instruction. */
7709 if (inst.instruction < 0xffff /* not ldmdb/stmdb */
7710 && inst.size_req != 4
7711 && inst.operands[0].reg <= 7
7712 && !(inst.operands[1].imm & ~0xff)
7713 && (inst.instruction == T_MNEM_stmia
7714 ? inst.operands[0].writeback
7715 : (inst.operands[0].writeback
7716 == !(inst.operands[1].imm & (1 << inst.operands[0].reg)))))
7718 if (inst.instruction == T_MNEM_stmia
7719 && (inst.operands[1].imm & (1 << inst.operands[0].reg))
7720 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
7721 as_warn (_("value stored for r%d is UNPREDICTABLE"),
7722 inst.operands[0].reg);
7724 inst.instruction = THUMB_OP16 (inst.instruction);
7725 inst.instruction |= inst.operands[0].reg << 8;
7726 inst.instruction |= inst.operands[1].imm;
7730 if (inst.operands[1].imm & (1 << 13))
7731 as_warn (_("SP should not be in register list"));
7732 if (inst.instruction == T_MNEM_stmia)
7734 if (inst.operands[1].imm & (1 << 15))
7735 as_warn (_("PC should not be in register list"));
7736 if (inst.operands[1].imm & (1 << inst.operands[0].reg))
7737 as_warn (_("value stored for r%d is UNPREDICTABLE"),
7738 inst.operands[0].reg);
7742 if (inst.operands[1].imm & (1 << 14)
7743 && inst.operands[1].imm & (1 << 15))
7744 as_warn (_("LR and PC should not both be in register list"));
7745 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
7746 && inst.operands[0].writeback)
7747 as_warn (_("base register should not be in register list "
7748 "when written back"));
7750 if (inst.instruction < 0xffff)
7751 inst.instruction = THUMB_OP32 (inst.instruction);
7752 inst.instruction |= inst.operands[0].reg << 16;
7753 inst.instruction |= inst.operands[1].imm;
7754 if (inst.operands[0].writeback)
7755 inst.instruction |= WRITE_BACK;
7760 constraint (inst.operands[0].reg > 7
7761 || (inst.operands[1].imm & ~0xff), BAD_HIREG);
7762 if (inst.instruction == T_MNEM_stmia)
7764 if (!inst.operands[0].writeback)
7765 as_warn (_("this instruction will write back the base register"));
7766 if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
7767 && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
7768 as_warn (_("value stored for r%d is UNPREDICTABLE"),
7769 inst.operands[0].reg);
7773 if (!inst.operands[0].writeback
7774 && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
7775 as_warn (_("this instruction will write back the base register"));
7776 else if (inst.operands[0].writeback
7777 && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
7778 as_warn (_("this instruction will not write back the base register"));
7781 inst.instruction = THUMB_OP16 (inst.instruction);
7782 inst.instruction |= inst.operands[0].reg << 8;
7783 inst.instruction |= inst.operands[1].imm;
7790 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
7791 || inst.operands[1].postind || inst.operands[1].writeback
7792 || inst.operands[1].immisreg || inst.operands[1].shifted
7793 || inst.operands[1].negative,
7796 inst.instruction |= inst.operands[0].reg << 12;
7797 inst.instruction |= inst.operands[1].reg << 16;
7798 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
7804 if (!inst.operands[1].present)
7806 constraint (inst.operands[0].reg == REG_LR,
7807 _("r14 not allowed as first register "
7808 "when second register is omitted"));
7809 inst.operands[1].reg = inst.operands[0].reg + 1;
7811 constraint (inst.operands[0].reg == inst.operands[1].reg,
7814 inst.instruction |= inst.operands[0].reg << 12;
7815 inst.instruction |= inst.operands[1].reg << 8;
7816 inst.instruction |= inst.operands[2].reg << 16;
7822 unsigned long opcode;
7825 opcode = inst.instruction;
7828 if (!inst.operands[1].isreg)
7830 if (opcode <= 0xffff)
7831 inst.instruction = THUMB_OP32 (opcode);
7832 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
7835 if (inst.operands[1].isreg
7836 && !inst.operands[1].writeback
7837 && !inst.operands[1].shifted && !inst.operands[1].postind
7838 && !inst.operands[1].negative && inst.operands[0].reg <= 7
7840 && inst.size_req != 4)
7842 /* Insn may have a 16-bit form. */
7843 Rn = inst.operands[1].reg;
7844 if (inst.operands[1].immisreg)
7846 inst.instruction = THUMB_OP16 (opcode);
7848 if (Rn <= 7 && inst.operands[1].imm <= 7)
7851 else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
7852 && opcode != T_MNEM_ldrsb)
7853 || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
7854 || (Rn == REG_SP && opcode == T_MNEM_str))
7861 if (inst.reloc.pc_rel)
7862 opcode = T_MNEM_ldr_pc2;
7864 opcode = T_MNEM_ldr_pc;
7868 if (opcode == T_MNEM_ldr)
7869 opcode = T_MNEM_ldr_sp;
7871 opcode = T_MNEM_str_sp;
7873 inst.instruction = inst.operands[0].reg << 8;
7877 inst.instruction = inst.operands[0].reg;
7878 inst.instruction |= inst.operands[1].reg << 3;
7880 inst.instruction |= THUMB_OP16 (opcode);
7881 if (inst.size_req == 2)
7882 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
7884 inst.relax = opcode;
7888 /* Definitely a 32-bit variant. */
7889 inst.instruction = THUMB_OP32 (opcode);
7890 inst.instruction |= inst.operands[0].reg << 12;
7891 encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
7895 constraint (inst.operands[0].reg > 7, BAD_HIREG);
7897 if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
7899 /* Only [Rn,Rm] is acceptable. */
7900 constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
7901 constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
7902 || inst.operands[1].postind || inst.operands[1].shifted
7903 || inst.operands[1].negative,
7904 _("Thumb does not support this addressing mode"));
7905 inst.instruction = THUMB_OP16 (inst.instruction);
7909 inst.instruction = THUMB_OP16 (inst.instruction);
7910 if (!inst.operands[1].isreg)
7911 if (move_or_literal_pool (0, /*thumb_p=*/TRUE, /*mode_3=*/FALSE))
7914 constraint (!inst.operands[1].preind
7915 || inst.operands[1].shifted
7916 || inst.operands[1].writeback,
7917 _("Thumb does not support this addressing mode"));
7918 if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
7920 constraint (inst.instruction & 0x0600,
7921 _("byte or halfword not valid for base register"));
7922 constraint (inst.operands[1].reg == REG_PC
7923 && !(inst.instruction & THUMB_LOAD_BIT),
7924 _("r15 based store not allowed"));
7925 constraint (inst.operands[1].immisreg,
7926 _("invalid base register for register offset"));
7928 if (inst.operands[1].reg == REG_PC)
7929 inst.instruction = T_OPCODE_LDR_PC;
7930 else if (inst.instruction & THUMB_LOAD_BIT)
7931 inst.instruction = T_OPCODE_LDR_SP;
7933 inst.instruction = T_OPCODE_STR_SP;
7935 inst.instruction |= inst.operands[0].reg << 8;
7936 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
7940 constraint (inst.operands[1].reg > 7, BAD_HIREG);
7941 if (!inst.operands[1].immisreg)
7943 /* Immediate offset. */
7944 inst.instruction |= inst.operands[0].reg;
7945 inst.instruction |= inst.operands[1].reg << 3;
7946 inst.reloc.type = BFD_RELOC_ARM_THUMB_OFFSET;
7950 /* Register offset. */
7951 constraint (inst.operands[1].imm > 7, BAD_HIREG);
7952 constraint (inst.operands[1].negative,
7953 _("Thumb does not support this addressing mode"));
7956 switch (inst.instruction)
7958 case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
7959 case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
7960 case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
7961 case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
7962 case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
7963 case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
7964 case 0x5600 /* ldrsb */:
7965 case 0x5e00 /* ldrsh */: break;
7969 inst.instruction |= inst.operands[0].reg;
7970 inst.instruction |= inst.operands[1].reg << 3;
7971 inst.instruction |= inst.operands[1].imm << 6;
7977 if (!inst.operands[1].present)
7979 inst.operands[1].reg = inst.operands[0].reg + 1;
7980 constraint (inst.operands[0].reg == REG_LR,
7981 _("r14 not allowed here"));
7983 inst.instruction |= inst.operands[0].reg << 12;
7984 inst.instruction |= inst.operands[1].reg << 8;
7985 encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
7992 inst.instruction |= inst.operands[0].reg << 12;
7993 encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
7999 inst.instruction |= inst.operands[0].reg << 8;
8000 inst.instruction |= inst.operands[1].reg << 16;
8001 inst.instruction |= inst.operands[2].reg;
8002 inst.instruction |= inst.operands[3].reg << 12;
8008 inst.instruction |= inst.operands[0].reg << 12;
8009 inst.instruction |= inst.operands[1].reg << 8;
8010 inst.instruction |= inst.operands[2].reg << 16;
8011 inst.instruction |= inst.operands[3].reg;
8019 int r0off = (inst.instruction == T_MNEM_mov
8020 || inst.instruction == T_MNEM_movs) ? 8 : 16;
8021 unsigned long opcode;
8023 bfd_boolean low_regs;
8025 low_regs = (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7);
8026 opcode = inst.instruction;
8027 if (current_it_mask)
8028 narrow = opcode != T_MNEM_movs;
8030 narrow = opcode != T_MNEM_movs || low_regs;
8031 if (inst.size_req == 4
8032 || inst.operands[1].shifted)
8035 if (!inst.operands[1].isreg)
8037 /* Immediate operand. */
8038 if (current_it_mask == 0 && opcode == T_MNEM_mov)
8040 if (low_regs && narrow)
8042 inst.instruction = THUMB_OP16 (opcode);
8043 inst.instruction |= inst.operands[0].reg << 8;
8044 if (inst.size_req == 2)
8045 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8047 inst.relax = opcode;
8051 inst.instruction = THUMB_OP32 (inst.instruction);
8052 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8053 inst.instruction |= inst.operands[0].reg << r0off;
8054 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8059 inst.instruction = THUMB_OP32 (inst.instruction);
8060 inst.instruction |= inst.operands[0].reg << r0off;
8061 encode_thumb32_shifted_operand (1);
8064 switch (inst.instruction)
8067 inst.instruction = T_OPCODE_MOV_HR;
8068 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8069 inst.instruction |= (inst.operands[0].reg & 0x7);
8070 inst.instruction |= inst.operands[1].reg << 3;
8074 /* We know we have low registers at this point.
8075 Generate ADD Rd, Rs, #0. */
8076 inst.instruction = T_OPCODE_ADD_I3;
8077 inst.instruction |= inst.operands[0].reg;
8078 inst.instruction |= inst.operands[1].reg << 3;
8084 inst.instruction = T_OPCODE_CMP_LR;
8085 inst.instruction |= inst.operands[0].reg;
8086 inst.instruction |= inst.operands[1].reg << 3;
8090 inst.instruction = T_OPCODE_CMP_HR;
8091 inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
8092 inst.instruction |= (inst.operands[0].reg & 0x7);
8093 inst.instruction |= inst.operands[1].reg << 3;
8100 inst.instruction = THUMB_OP16 (inst.instruction);
8101 if (inst.operands[1].isreg)
8103 if (inst.operands[0].reg < 8 && inst.operands[1].reg < 8)
8105 /* A move of two lowregs is encoded as ADD Rd, Rs, #0
8106 since a MOV instruction produces unpredictable results. */
8107 if (inst.instruction == T_OPCODE_MOV_I8)
8108 inst.instruction = T_OPCODE_ADD_I3;
8110 inst.instruction = T_OPCODE_CMP_LR;
8112 inst.instruction |= inst.operands[0].reg;
8113 inst.instruction |= inst.operands[1].reg << 3;
8117 if (inst.instruction == T_OPCODE_MOV_I8)
8118 inst.instruction = T_OPCODE_MOV_HR;
8120 inst.instruction = T_OPCODE_CMP_HR;
8126 constraint (inst.operands[0].reg > 7,
8127 _("only lo regs allowed with immediate"));
8128 inst.instruction |= inst.operands[0].reg << 8;
8129 inst.reloc.type = BFD_RELOC_ARM_THUMB_IMM;
8136 inst.instruction |= inst.operands[0].reg << 8;
8137 inst.instruction |= (inst.operands[1].imm & 0xf000) << 4;
8138 inst.instruction |= (inst.operands[1].imm & 0x0800) << 15;
8139 inst.instruction |= (inst.operands[1].imm & 0x0700) << 4;
8140 inst.instruction |= (inst.operands[1].imm & 0x00ff);
8148 int r0off = (inst.instruction == T_MNEM_mvn
8149 || inst.instruction == T_MNEM_mvns) ? 8 : 16;
8152 if (inst.size_req == 4
8153 || inst.instruction > 0xffff
8154 || inst.operands[1].shifted
8155 || inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8157 else if (inst.instruction == T_MNEM_cmn)
8159 else if (THUMB_SETS_FLAGS (inst.instruction))
8160 narrow = (current_it_mask == 0);
8162 narrow = (current_it_mask != 0);
8164 if (!inst.operands[1].isreg)
8166 /* For an immediate, we always generate a 32-bit opcode;
8167 section relaxation will shrink it later if possible. */
8168 if (inst.instruction < 0xffff)
8169 inst.instruction = THUMB_OP32 (inst.instruction);
8170 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8171 inst.instruction |= inst.operands[0].reg << r0off;
8172 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8176 /* See if we can do this with a 16-bit instruction. */
8179 inst.instruction = THUMB_OP16 (inst.instruction);
8180 inst.instruction |= inst.operands[0].reg;
8181 inst.instruction |= inst.operands[1].reg << 3;
8185 constraint (inst.operands[1].shifted
8186 && inst.operands[1].immisreg,
8187 _("shift must be constant"));
8188 if (inst.instruction < 0xffff)
8189 inst.instruction = THUMB_OP32 (inst.instruction);
8190 inst.instruction |= inst.operands[0].reg << r0off;
8191 encode_thumb32_shifted_operand (1);
8197 constraint (inst.instruction > 0xffff
8198 || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
8199 constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
8200 _("unshifted register required"));
8201 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8204 inst.instruction = THUMB_OP16 (inst.instruction);
8205 inst.instruction |= inst.operands[0].reg;
8206 inst.instruction |= inst.operands[1].reg << 3;
8214 flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
8217 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8218 _("selected processor does not support "
8219 "requested special purpose register"));
8223 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8224 _("selected processor does not support "
8225 "requested special purpose register %x"));
8226 /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all. */
8227 constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
8228 _("'CPSR' or 'SPSR' expected"));
8231 inst.instruction |= inst.operands[0].reg << 8;
8232 inst.instruction |= (flags & SPSR_BIT) >> 2;
8233 inst.instruction |= inst.operands[1].imm & 0xff;
8241 constraint (!inst.operands[1].isreg,
8242 _("Thumb encoding does not support an immediate here"));
8243 flags = inst.operands[0].imm;
8246 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1),
8247 _("selected processor does not support "
8248 "requested special purpose register"));
8252 constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7m),
8253 _("selected processor does not support "
8254 "requested special purpose register"));
8257 inst.instruction |= (flags & SPSR_BIT) >> 2;
8258 inst.instruction |= (flags & ~SPSR_BIT) >> 8;
8259 inst.instruction |= (flags & 0xff);
8260 inst.instruction |= inst.operands[1].reg << 16;
8266 if (!inst.operands[2].present)
8267 inst.operands[2].reg = inst.operands[0].reg;
8269 /* There is no 32-bit MULS and no 16-bit MUL. */
8270 if (unified_syntax && inst.instruction == T_MNEM_mul)
8272 inst.instruction = THUMB_OP32 (inst.instruction);
8273 inst.instruction |= inst.operands[0].reg << 8;
8274 inst.instruction |= inst.operands[1].reg << 16;
8275 inst.instruction |= inst.operands[2].reg << 0;
8279 constraint (!unified_syntax
8280 && inst.instruction == T_MNEM_muls, BAD_THUMB32);
8281 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8284 inst.instruction = THUMB_OP16 (inst.instruction);
8285 inst.instruction |= inst.operands[0].reg;
8287 if (inst.operands[0].reg == inst.operands[1].reg)
8288 inst.instruction |= inst.operands[2].reg << 3;
8289 else if (inst.operands[0].reg == inst.operands[2].reg)
8290 inst.instruction |= inst.operands[1].reg << 3;
8292 constraint (1, _("dest must overlap one source register"));
8299 inst.instruction |= inst.operands[0].reg << 12;
8300 inst.instruction |= inst.operands[1].reg << 8;
8301 inst.instruction |= inst.operands[2].reg << 16;
8302 inst.instruction |= inst.operands[3].reg;
8304 if (inst.operands[0].reg == inst.operands[1].reg)
8305 as_tsktsk (_("rdhi and rdlo must be different"));
8313 if (inst.size_req == 4 || inst.operands[0].imm > 15)
8315 inst.instruction = THUMB_OP32 (inst.instruction);
8316 inst.instruction |= inst.operands[0].imm;
8320 inst.instruction = THUMB_OP16 (inst.instruction);
8321 inst.instruction |= inst.operands[0].imm << 4;
8326 constraint (inst.operands[0].present,
8327 _("Thumb does not support NOP with hints"));
8328 inst.instruction = 0x46c0;
8339 if (THUMB_SETS_FLAGS (inst.instruction))
8340 narrow = (current_it_mask == 0);
8342 narrow = (current_it_mask != 0);
8343 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8345 if (inst.size_req == 4)
8350 inst.instruction = THUMB_OP32 (inst.instruction);
8351 inst.instruction |= inst.operands[0].reg << 8;
8352 inst.instruction |= inst.operands[1].reg << 16;
8356 inst.instruction = THUMB_OP16 (inst.instruction);
8357 inst.instruction |= inst.operands[0].reg;
8358 inst.instruction |= inst.operands[1].reg << 3;
8363 constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
8365 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8367 inst.instruction = THUMB_OP16 (inst.instruction);
8368 inst.instruction |= inst.operands[0].reg;
8369 inst.instruction |= inst.operands[1].reg << 3;
8376 inst.instruction |= inst.operands[0].reg << 8;
8377 inst.instruction |= inst.operands[1].reg << 16;
8378 inst.instruction |= inst.operands[2].reg;
8379 if (inst.operands[3].present)
8381 unsigned int val = inst.reloc.exp.X_add_number;
8382 constraint (inst.reloc.exp.X_op != O_constant,
8383 _("expression too complex"));
8384 inst.instruction |= (val & 0x1c) << 10;
8385 inst.instruction |= (val & 0x03) << 6;
8392 if (!inst.operands[3].present)
8393 inst.instruction &= ~0x00000020;
8400 encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
8404 do_t_push_pop (void)
8408 constraint (inst.operands[0].writeback,
8409 _("push/pop do not support {reglist}^"));
8410 constraint (inst.reloc.type != BFD_RELOC_UNUSED,
8411 _("expression too complex"));
8413 mask = inst.operands[0].imm;
8414 if ((mask & ~0xff) == 0)
8415 inst.instruction = THUMB_OP16 (inst.instruction);
8416 else if ((inst.instruction == T_MNEM_push
8417 && (mask & ~0xff) == 1 << REG_LR)
8418 || (inst.instruction == T_MNEM_pop
8419 && (mask & ~0xff) == 1 << REG_PC))
8421 inst.instruction = THUMB_OP16 (inst.instruction);
8422 inst.instruction |= THUMB_PP_PC_LR;
8425 else if (unified_syntax)
8427 if (mask & (1 << 13))
8428 inst.error = _("SP not allowed in register list");
8429 if (inst.instruction == T_MNEM_push)
8431 if (mask & (1 << 15))
8432 inst.error = _("PC not allowed in register list");
8436 if (mask & (1 << 14)
8437 && mask & (1 << 15))
8438 inst.error = _("LR and PC should not both be in register list");
8440 if ((mask & (mask - 1)) == 0)
8442 /* Single register push/pop implemented as str/ldr. */
8443 if (inst.instruction == T_MNEM_push)
8444 inst.instruction = 0xf84d0d04; /* str reg, [sp, #-4]! */
8446 inst.instruction = 0xf85d0b04; /* ldr reg, [sp], #4 */
8447 mask = ffs(mask) - 1;
8451 inst.instruction = THUMB_OP32 (inst.instruction);
8455 inst.error = _("invalid register list to push/pop instruction");
8459 inst.instruction |= mask;
8465 inst.instruction |= inst.operands[0].reg << 8;
8466 inst.instruction |= inst.operands[1].reg << 16;
8472 if (inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
8473 && inst.size_req != 4)
8475 inst.instruction = THUMB_OP16 (inst.instruction);
8476 inst.instruction |= inst.operands[0].reg;
8477 inst.instruction |= inst.operands[1].reg << 3;
8479 else if (unified_syntax)
8481 inst.instruction = THUMB_OP32 (inst.instruction);
8482 inst.instruction |= inst.operands[0].reg << 8;
8483 inst.instruction |= inst.operands[1].reg << 16;
8484 inst.instruction |= inst.operands[1].reg;
8487 inst.error = BAD_HIREG;
8495 Rd = inst.operands[0].reg;
8496 Rs = (inst.operands[1].present
8497 ? inst.operands[1].reg /* Rd, Rs, foo */
8498 : inst.operands[0].reg); /* Rd, foo -> Rd, Rd, foo */
8500 inst.instruction |= Rd << 8;
8501 inst.instruction |= Rs << 16;
8502 if (!inst.operands[2].isreg)
8504 inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
8505 inst.reloc.type = BFD_RELOC_ARM_T32_IMMEDIATE;
8508 encode_thumb32_shifted_operand (2);
8514 constraint (current_it_mask, BAD_NOT_IT);
8515 if (inst.operands[0].imm)
8516 inst.instruction |= 0x8;
8522 if (!inst.operands[1].present)
8523 inst.operands[1].reg = inst.operands[0].reg;
8530 switch (inst.instruction)
8533 case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
8535 case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
8537 case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
8539 case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
8543 if (THUMB_SETS_FLAGS (inst.instruction))
8544 narrow = (current_it_mask == 0);
8546 narrow = (current_it_mask != 0);
8547 if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
8549 if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
8551 if (inst.operands[2].isreg
8552 && (inst.operands[1].reg != inst.operands[0].reg
8553 || inst.operands[2].reg > 7))
8555 if (inst.size_req == 4)
8560 if (inst.operands[2].isreg)
8562 inst.instruction = THUMB_OP32 (inst.instruction);
8563 inst.instruction |= inst.operands[0].reg << 8;
8564 inst.instruction |= inst.operands[1].reg << 16;
8565 inst.instruction |= inst.operands[2].reg;
8569 inst.operands[1].shifted = 1;
8570 inst.operands[1].shift_kind = shift_kind;
8571 inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
8572 ? T_MNEM_movs : T_MNEM_mov);
8573 inst.instruction |= inst.operands[0].reg << 8;
8574 encode_thumb32_shifted_operand (1);
8575 /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup. */
8576 inst.reloc.type = BFD_RELOC_UNUSED;
8581 if (inst.operands[2].isreg)
8585 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
8586 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
8587 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
8588 case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
8592 inst.instruction |= inst.operands[0].reg;
8593 inst.instruction |= inst.operands[2].reg << 3;
8599 case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
8600 case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
8601 case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
8604 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
8605 inst.instruction |= inst.operands[0].reg;
8606 inst.instruction |= inst.operands[1].reg << 3;
8612 constraint (inst.operands[0].reg > 7
8613 || inst.operands[1].reg > 7, BAD_HIREG);
8614 constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
8616 if (inst.operands[2].isreg) /* Rd, {Rs,} Rn */
8618 constraint (inst.operands[2].reg > 7, BAD_HIREG);
8619 constraint (inst.operands[0].reg != inst.operands[1].reg,
8620 _("source1 and dest must be same register"));
8622 switch (inst.instruction)
8624 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
8625 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
8626 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
8627 case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
8631 inst.instruction |= inst.operands[0].reg;
8632 inst.instruction |= inst.operands[2].reg << 3;
8636 switch (inst.instruction)
8638 case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
8639 case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
8640 case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
8641 case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
8644 inst.reloc.type = BFD_RELOC_ARM_THUMB_SHIFT;
8645 inst.instruction |= inst.operands[0].reg;
8646 inst.instruction |= inst.operands[1].reg << 3;
8654 inst.instruction |= inst.operands[0].reg << 8;
8655 inst.instruction |= inst.operands[1].reg << 16;
8656 inst.instruction |= inst.operands[2].reg;
8662 unsigned int value = inst.reloc.exp.X_add_number;
8663 constraint (inst.reloc.exp.X_op != O_constant,
8664 _("expression too complex"));
8665 inst.reloc.type = BFD_RELOC_UNUSED;
8666 inst.instruction |= (value & 0xf000) >> 12;
8667 inst.instruction |= (value & 0x0ff0);
8668 inst.instruction |= (value & 0x000f) << 16;
8674 inst.instruction |= inst.operands[0].reg << 8;
8675 inst.instruction |= inst.operands[1].imm - 1;
8676 inst.instruction |= inst.operands[2].reg << 16;
8678 if (inst.operands[3].present)
8680 constraint (inst.reloc.exp.X_op != O_constant,
8681 _("expression too complex"));
8683 if (inst.reloc.exp.X_add_number != 0)
8685 if (inst.operands[3].shift_kind == SHIFT_ASR)
8686 inst.instruction |= 0x00200000; /* sh bit */
8687 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
8688 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
8690 inst.reloc.type = BFD_RELOC_UNUSED;
8697 inst.instruction |= inst.operands[0].reg << 8;
8698 inst.instruction |= inst.operands[1].imm - 1;
8699 inst.instruction |= inst.operands[2].reg << 16;
8705 constraint (!inst.operands[2].isreg || !inst.operands[2].preind
8706 || inst.operands[2].postind || inst.operands[2].writeback
8707 || inst.operands[2].immisreg || inst.operands[2].shifted
8708 || inst.operands[2].negative,
8711 inst.instruction |= inst.operands[0].reg << 8;
8712 inst.instruction |= inst.operands[1].reg << 12;
8713 inst.instruction |= inst.operands[2].reg << 16;
8714 inst.reloc.type = BFD_RELOC_ARM_T32_OFFSET_U8;
8720 if (!inst.operands[2].present)
8721 inst.operands[2].reg = inst.operands[1].reg + 1;
8723 constraint (inst.operands[0].reg == inst.operands[1].reg
8724 || inst.operands[0].reg == inst.operands[2].reg
8725 || inst.operands[0].reg == inst.operands[3].reg
8726 || inst.operands[1].reg == inst.operands[2].reg,
8729 inst.instruction |= inst.operands[0].reg;
8730 inst.instruction |= inst.operands[1].reg << 12;
8731 inst.instruction |= inst.operands[2].reg << 8;
8732 inst.instruction |= inst.operands[3].reg << 16;
8738 inst.instruction |= inst.operands[0].reg << 8;
8739 inst.instruction |= inst.operands[1].reg << 16;
8740 inst.instruction |= inst.operands[2].reg;
8741 inst.instruction |= inst.operands[3].imm << 4;
8747 if (inst.instruction <= 0xffff && inst.size_req != 4
8748 && inst.operands[0].reg <= 7 && inst.operands[1].reg <= 7
8749 && (!inst.operands[2].present || inst.operands[2].imm == 0))
8751 inst.instruction = THUMB_OP16 (inst.instruction);
8752 inst.instruction |= inst.operands[0].reg;
8753 inst.instruction |= inst.operands[1].reg << 3;
8755 else if (unified_syntax)
8757 if (inst.instruction <= 0xffff)
8758 inst.instruction = THUMB_OP32 (inst.instruction);
8759 inst.instruction |= inst.operands[0].reg << 8;
8760 inst.instruction |= inst.operands[1].reg;
8761 inst.instruction |= inst.operands[2].imm << 4;
8765 constraint (inst.operands[2].present && inst.operands[2].imm != 0,
8766 _("Thumb encoding does not support rotation"));
8767 constraint (1, BAD_HIREG);
8774 inst.reloc.type = BFD_RELOC_ARM_SWI;
8782 half = (inst.instruction & 0x10) != 0;
8783 constraint (current_it_mask && current_it_mask != 0x10, BAD_BRANCH);
8784 constraint (inst.operands[0].immisreg,
8785 _("instruction requires register index"));
8786 constraint (inst.operands[0].imm == 15,
8787 _("PC is not a valid index register"));
8788 constraint (!half && inst.operands[0].shifted,
8789 _("instruction does not allow shifted index"));
8790 inst.instruction |= (inst.operands[0].reg << 16) | inst.operands[0].imm;
8796 inst.instruction |= inst.operands[0].reg << 8;
8797 inst.instruction |= inst.operands[1].imm;
8798 inst.instruction |= inst.operands[2].reg << 16;
8800 if (inst.operands[3].present)
8802 constraint (inst.reloc.exp.X_op != O_constant,
8803 _("expression too complex"));
8804 if (inst.reloc.exp.X_add_number != 0)
8806 if (inst.operands[3].shift_kind == SHIFT_ASR)
8807 inst.instruction |= 0x00200000; /* sh bit */
8809 inst.instruction |= (inst.reloc.exp.X_add_number & 0x1c) << 10;
8810 inst.instruction |= (inst.reloc.exp.X_add_number & 0x03) << 6;
8812 inst.reloc.type = BFD_RELOC_UNUSED;
8819 inst.instruction |= inst.operands[0].reg << 8;
8820 inst.instruction |= inst.operands[1].imm;
8821 inst.instruction |= inst.operands[2].reg << 16;
8824 /* Neon instruction encoder helpers. */
8826 /* Encodings for the different types for various Neon opcodes. */
8828 /* An "invalid" code for the following tables. */
8831 struct neon_tab_entry
8834 unsigned float_or_poly;
8835 unsigned scalar_or_imm;
8838 /* Map overloaded Neon opcodes to their respective encodings. */
8839 #define NEON_ENC_TAB \
8840 X(vabd, 0x0000700, 0x1200d00, N_INV), \
8841 X(vmax, 0x0000600, 0x0000f00, N_INV), \
8842 X(vmin, 0x0000610, 0x0200f00, N_INV), \
8843 X(vpadd, 0x0000b10, 0x1000d00, N_INV), \
8844 X(vpmax, 0x0000a00, 0x1000f00, N_INV), \
8845 X(vpmin, 0x0000a10, 0x1200f00, N_INV), \
8846 X(vadd, 0x0000800, 0x0000d00, N_INV), \
8847 X(vsub, 0x1000800, 0x0200d00, N_INV), \
8848 X(vceq, 0x1000810, 0x0000e00, 0x1b10100), \
8849 X(vcge, 0x0000310, 0x1000e00, 0x1b10080), \
8850 X(vcgt, 0x0000300, 0x1200e00, 0x1b10000), \
8851 /* Register variants of the following two instructions are encoded as
8852 vcge / vcgt with the operands reversed. */ \
8853 X(vclt, 0x0000310, 0x1000e00, 0x1b10200), \
8854 X(vcle, 0x0000300, 0x1200e00, 0x1b10180), \
8855 X(vmla, 0x0000900, 0x0000d10, 0x0800040), \
8856 X(vmls, 0x1000900, 0x0200d10, 0x0800440), \
8857 X(vmul, 0x0000910, 0x1000d10, 0x0800840), \
8858 X(vmull, 0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float. */ \
8859 X(vmlal, 0x0800800, N_INV, 0x0800240), \
8860 X(vmlsl, 0x0800a00, N_INV, 0x0800640), \
8861 X(vqdmlal, 0x0800900, N_INV, 0x0800340), \
8862 X(vqdmlsl, 0x0800b00, N_INV, 0x0800740), \
8863 X(vqdmull, 0x0800d00, N_INV, 0x0800b40), \
8864 X(vqdmulh, 0x0000b00, N_INV, 0x0800c40), \
8865 X(vqrdmulh, 0x1000b00, N_INV, 0x0800d40), \
8866 X(vshl, 0x0000400, N_INV, 0x0800510), \
8867 X(vqshl, 0x0000410, N_INV, 0x0800710), \
8868 X(vand, 0x0000110, N_INV, 0x0800030), \
8869 X(vbic, 0x0100110, N_INV, 0x0800030), \
8870 X(veor, 0x1000110, N_INV, N_INV), \
8871 X(vorn, 0x0300110, N_INV, 0x0800010), \
8872 X(vorr, 0x0200110, N_INV, 0x0800010), \
8873 X(vmvn, 0x1b00580, N_INV, 0x0800030), \
8874 X(vshll, 0x1b20300, N_INV, 0x0800a10), /* max shift, immediate. */ \
8875 X(vcvt, 0x1b30600, N_INV, 0x0800e10), /* integer, fixed-point. */ \
8876 X(vdup, 0xe800b10, N_INV, 0x1b00c00), /* arm, scalar. */ \
8877 X(vld1, 0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup. */ \
8878 X(vst1, 0x0000000, 0x0800000, N_INV), \
8879 X(vld2, 0x0200100, 0x0a00100, 0x0a00d00), \
8880 X(vst2, 0x0000100, 0x0800100, N_INV), \
8881 X(vld3, 0x0200200, 0x0a00200, 0x0a00e00), \
8882 X(vst3, 0x0000200, 0x0800200, N_INV), \
8883 X(vld4, 0x0200300, 0x0a00300, 0x0a00f00), \
8884 X(vst4, 0x0000300, 0x0800300, N_INV), \
8885 X(vmovn, 0x1b20200, N_INV, N_INV), \
8886 X(vtrn, 0x1b20080, N_INV, N_INV), \
8887 X(vqmovn, 0x1b20200, N_INV, N_INV), \
8888 X(vqmovun, 0x1b20240, N_INV, N_INV)
8892 #define X(OPC,I,F,S) N_MNEM_##OPC
8897 static const struct neon_tab_entry neon_enc_tab[] =
8899 #define X(OPC,I,F,S) { (I), (F), (S) }
8904 #define NEON_ENC_INTEGER(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
8905 #define NEON_ENC_ARMREG(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
8906 #define NEON_ENC_POLY(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
8907 #define NEON_ENC_FLOAT(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
8908 #define NEON_ENC_SCALAR(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
8909 #define NEON_ENC_IMMED(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
8910 #define NEON_ENC_INTERLV(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
8911 #define NEON_ENC_LANE(X) (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
8912 #define NEON_ENC_DUP(X) (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
8914 /* Shapes for instruction operands. Some (e.g. NS_DDD_QQQ) represent multiple
8915 shapes which an instruction can accept. The following mnemonic characters
8916 are used in the tag names for this enumeration:
8918 D - Neon D<n> register
8919 Q - Neon Q<n> register
8923 L - D<n> register list
8964 /* Bit masks used in type checking given instructions.
8965 'N_EQK' means the type must be the same as (or based on in some way) the key
8966 type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
8967 set, various other bits can be set as well in order to modify the meaning of
8968 the type constraint. */
8991 N_KEY = 0x080000, /* key element (main type specifier). */
8992 N_EQK = 0x100000, /* given operand has the same type & size as the key. */
8993 N_DBL = 0x000001, /* if N_EQK, this operand is twice the size. */
8994 N_HLF = 0x000002, /* if N_EQK, this operand is half the size. */
8995 N_SGN = 0x000004, /* if N_EQK, this operand is forced to be signed. */
8996 N_UNS = 0x000008, /* if N_EQK, this operand is forced to be unsigned. */
8997 N_INT = 0x000010, /* if N_EQK, this operand is forced to be integer. */
8998 N_FLT = 0x000020, /* if N_EQK, this operand is forced to be float. */
9000 N_MAX_NONSPECIAL = N_F32
9003 #define N_SU_ALL (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
9004 #define N_SU_32 (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
9005 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
9006 #define N_SUF_32 (N_SU_32 | N_F32)
9007 #define N_I_ALL (N_I8 | N_I16 | N_I32 | N_I64)
9008 #define N_IF_32 (N_I8 | N_I16 | N_I32 | N_F32)
9010 /* Pass this as the first type argument to neon_check_type to ignore types
9012 #define N_IGNORE_TYPE (N_KEY | N_EQK)
9014 /* Check the shape of a Neon instruction (sizes of registers). Returns the more
9015 specific shape when there are two alternatives. For non-polymorphic shapes,
9016 checking is done during operand parsing, so is not implemented here. */
9018 static enum neon_shape
9019 neon_check_shape (enum neon_shape req)
9021 #define RR(X) (inst.operands[(X)].isreg)
9022 #define RD(X) (inst.operands[(X)].isreg && !inst.operands[(X)].isquad)
9023 #define RQ(X) (inst.operands[(X)].isreg && inst.operands[(X)].isquad)
9024 #define IM(X) (!inst.operands[(X)].isreg && !inst.operands[(X)].isscalar)
9025 #define SC(X) (!inst.operands[(X)].isreg && inst.operands[(X)].isscalar)
9027 /* Fix missing optional operands. FIXME: we don't know at this point how
9028 many arguments we should have, so this makes the assumption that we have
9029 > 1. This is true of all current Neon opcodes, I think, but may not be
9030 true in the future. */
9031 if (!inst.operands[1].present)
9032 inst.operands[1] = inst.operands[0];
9038 if (RD(0) && RD(1) && RD(2))
9040 else if (RQ(0) && RQ(1) && RQ(1))
9043 inst.error = _("expected <Qd>, <Qn>, <Qm> or <Dd>, <Dn>, <Dm> "
9050 if (RD(0) && RD(1) && IM(2))
9052 else if (RQ(0) && RQ(1) && IM(2))
9055 inst.error = _("expected <Qd>, <Qn>, #<imm> or <Dd>, <Dn>, #<imm> "
9062 if (RD(0) && RD(1) && RD(2) && IM(3))
9064 if (RQ(0) && RQ(1) && RQ(2) && IM(3))
9067 inst.error = _("expected <Qd>, <Qn>, <Qm>, #<imm> or "
9068 "<Dd>, <Dn>, <Dm>, #<imm> operands");
9074 if (RD(0) && RD(1) && SC(2))
9076 else if (RQ(0) && RQ(1) && SC(2))
9079 inst.error = _("expected <Qd>, <Qn>, <Dm[x]> or <Dd>, <Dn>, <Dm[x]> "
9088 else if (RQ(0) && RQ(1))
9091 inst.error = _("expected <Qd>, <Qm> or <Dd>, <Dm> operands");
9099 else if (RQ(0) && SC(1))
9102 inst.error = _("expected <Qd>, <Dm[x]> or <Dd>, <Dm[x]> operands");
9110 else if (RQ(0) && RR(1))
9113 inst.error = _("expected <Qd>, <Rm> or <Dd>, <Rm> operands");
9121 else if (RQ(0) && IM(1))
9124 inst.error = _("expected <Qd>, #<imm> or <Dd>, #<imm> operands");
9141 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
9144 /* Allow modification to be made to types which are constrained to be
9145 based on the key element, based on bits set alongside N_EQK. */
9146 if ((typebits & N_EQK) != 0)
9148 if ((typebits & N_HLF) != 0)
9150 else if ((typebits & N_DBL) != 0)
9152 if ((typebits & N_SGN) != 0)
9153 *g_type = NT_signed;
9154 else if ((typebits & N_UNS) != 0)
9155 *g_type = NT_unsigned;
9156 else if ((typebits & N_INT) != 0)
9157 *g_type = NT_integer;
9158 else if ((typebits & N_FLT) != 0)
9163 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
9164 operand type, i.e. the single type specified in a Neon instruction when it
9165 is the only one given. */
9167 static struct neon_type_el
9168 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
9170 struct neon_type_el dest = *key;
9172 assert ((thisarg & N_EQK) != 0);
9174 neon_modify_type_size (thisarg, &dest.type, &dest.size);
9179 /* Convert Neon type and size into compact bitmask representation. */
9181 static enum neon_type_mask
9182 type_chk_of_el_type (enum neon_el_type type, unsigned size)
9190 case 16: return N_16;
9191 case 32: return N_32;
9192 case 64: return N_64;
9200 case 8: return N_I8;
9201 case 16: return N_I16;
9202 case 32: return N_I32;
9203 case 64: return N_I64;
9216 case 8: return N_P8;
9217 case 16: return N_P16;
9225 case 8: return N_S8;
9226 case 16: return N_S16;
9227 case 32: return N_S32;
9228 case 64: return N_S64;
9236 case 8: return N_U8;
9237 case 16: return N_U16;
9238 case 32: return N_U32;
9239 case 64: return N_U64;
9250 /* Convert compact Neon bitmask type representation to a type and size. Only
9251 handles the case where a single bit is set in the mask. */
9254 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
9255 enum neon_type_mask mask)
9257 if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
9259 if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_P16)) != 0)
9261 if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
9263 if ((mask & (N_S64 | N_U64 | N_I64 | N_64)) != 0)
9265 if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
9267 if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
9268 *type = NT_unsigned;
9269 if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
9271 if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
9273 if ((mask & (N_P8 | N_P16)) != 0)
9275 if ((mask & N_F32) != 0)
9279 /* Modify a bitmask of allowed types. This is only needed for type
9283 modify_types_allowed (unsigned allowed, unsigned mods)
9286 enum neon_el_type type;
9292 for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
9294 el_type_of_type_chk (&type, &size, allowed & i);
9295 neon_modify_type_size (mods, &type, &size);
9296 destmask |= type_chk_of_el_type (type, size);
9302 /* Check type and return type classification.
9303 The manual states (paraphrase): If one datatype is given, it indicates the
9305 - the second operand, if there is one
9306 - the operand, if there is no second operand
9307 - the result, if there are no operands.
9308 This isn't quite good enough though, so we use a concept of a "key" datatype
9309 which is set on a per-instruction basis, which is the one which matters when
9310 only one data type is written.
9311 Note: this function has side-effects (e.g. filling in missing operands). All
9312 Neon instructions should call it before performing bit encoding.
9315 static struct neon_type_el
9316 neon_check_type (unsigned els, enum neon_shape ns, ...)
9319 unsigned i, pass, key_el = 0;
9320 unsigned types[NEON_MAX_TYPE_ELS];
9321 enum neon_el_type k_type = NT_invtype;
9322 unsigned k_size = -1u;
9323 struct neon_type_el badtype = {NT_invtype, -1};
9324 unsigned key_allowed = 0;
9326 /* Optional registers in Neon instructions are always (not) in operand 1.
9327 Fill in the missing operand here, if it was omitted. */
9328 if (els > 1 && !inst.operands[1].present)
9329 inst.operands[1] = inst.operands[0];
9331 /* Suck up all the varargs. */
9333 for (i = 0; i < els; i++)
9335 unsigned thisarg = va_arg (ap, unsigned);
9336 if (thisarg == N_IGNORE_TYPE)
9342 if ((thisarg & N_KEY) != 0)
9347 /* Duplicate inst.vectype elements here as necessary.
9348 FIXME: No idea if this is exactly the same as the ARM assembler,
9349 particularly when an insn takes one register and one non-register
9351 if (inst.vectype.elems == 1 && els > 1)
9354 inst.vectype.elems = els;
9355 inst.vectype.el[key_el] = inst.vectype.el[0];
9356 for (j = 0; j < els; j++)
9359 inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
9363 else if (inst.vectype.elems != els)
9365 inst.error = _("type specifier has the wrong number of parts");
9369 for (pass = 0; pass < 2; pass++)
9371 for (i = 0; i < els; i++)
9373 unsigned thisarg = types[i];
9374 unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
9375 ? modify_types_allowed (key_allowed, thisarg) : thisarg;
9376 enum neon_el_type g_type = inst.vectype.el[i].type;
9377 unsigned g_size = inst.vectype.el[i].size;
9379 /* Decay more-specific signed & unsigned types to sign-insensitive
9380 integer types if sign-specific variants are unavailable. */
9381 if ((g_type == NT_signed || g_type == NT_unsigned)
9382 && (types_allowed & N_SU_ALL) == 0)
9383 g_type = NT_integer;
9385 /* If only untyped args are allowed, decay any more specific types to
9386 them. Some instructions only care about signs for some element
9387 sizes, so handle that properly. */
9388 if ((g_size == 8 && (types_allowed & N_8) != 0)
9389 || (g_size == 16 && (types_allowed & N_16) != 0)
9390 || (g_size == 32 && (types_allowed & N_32) != 0)
9391 || (g_size == 64 && (types_allowed & N_64) != 0))
9392 g_type = NT_untyped;
9396 if ((thisarg & N_KEY) != 0)
9400 key_allowed = thisarg & ~N_KEY;
9405 if ((thisarg & N_EQK) == 0)
9407 unsigned given_type = type_chk_of_el_type (g_type, g_size);
9409 if ((given_type & types_allowed) == 0)
9411 inst.error = _("bad type in Neon instruction");
9417 enum neon_el_type mod_k_type = k_type;
9418 unsigned mod_k_size = k_size;
9419 neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
9420 if (g_type != mod_k_type || g_size != mod_k_size)
9422 inst.error = _("inconsistent types in Neon instruction");
9430 return inst.vectype.el[key_el];
9433 /* Fix up Neon data-processing instructions, ORing in the correct bits for
9434 ARM mode or Thumb mode and moving the encoded bit 24 to bit 28. */
9437 neon_dp_fixup (unsigned i)
9441 /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode. */
9455 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
9459 neon_logbits (unsigned x)
9464 #define LOW4(R) ((R) & 0xf)
9465 #define HI1(R) (((R) >> 4) & 1)
9467 /* Encode insns with bit pattern:
9469 |28/24|23|22 |21 20|19 16|15 12|11 8|7|6|5|4|3 0|
9470 | U |x |D |size | Rn | Rd |x x x x|N|Q|M|x| Rm |
9472 SIZE is passed in bits. -1 means size field isn't changed, in case it has a
9473 different meaning for some instruction. */
9476 neon_three_same (int first_optional, int isquad, int ubit, int size)
9478 /* FIXME optional argument handling. */
9479 if (first_optional && !inst.operands[0].present)
9480 inst.operands[0].reg = inst.operands[1].reg;
9482 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
9483 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
9484 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
9485 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
9486 inst.instruction |= LOW4 (inst.operands[2].reg);
9487 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
9488 inst.instruction |= (isquad != 0) << 6;
9489 inst.instruction |= (ubit != 0) << 24;
9491 inst.instruction |= neon_logbits (size) << 20;
9493 inst.instruction = neon_dp_fixup (inst.instruction);
9496 /* Encode instructions of the form:
9498 |28/24|23|22|21 20|19 18|17 16|15 12|11 7|6|5|4|3 0|
9499 | U |x |D |x x |size |x x | Rd |x x x x x|Q|M|x| Rm |
9501 Don't write size if SIZE == -1. */
9504 neon_two_same (int qbit, int ubit, int size)
9506 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
9507 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
9508 inst.instruction |= LOW4 (inst.operands[1].reg);
9509 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
9510 inst.instruction |= (qbit != 0) << 6;
9511 inst.instruction |= (ubit != 0) << 24;
9514 inst.instruction |= neon_logbits (size) << 18;
9516 inst.instruction = neon_dp_fixup (inst.instruction);
9519 /* Neon instruction encoders, in approximate order of appearance. */
9522 do_neon_dyadic_i_su (void)
9524 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
9525 struct neon_type_el et = neon_check_type (3, rs,
9526 N_EQK, N_EQK, N_SU_32 | N_KEY);
9527 neon_three_same (TRUE, rs == NS_QQQ, et.type == NT_unsigned, et.size);
9531 do_neon_dyadic_i64_su (void)
9533 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
9534 struct neon_type_el et = neon_check_type (3, rs,
9535 N_EQK, N_EQK, N_SU_ALL | N_KEY);
9536 neon_three_same (TRUE, rs == NS_QQQ, et.type == NT_unsigned, et.size);
9540 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
9543 unsigned size = et.size >> 3;
9544 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
9545 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
9546 inst.instruction |= LOW4 (inst.operands[1].reg);
9547 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
9548 inst.instruction |= (isquad != 0) << 6;
9549 inst.instruction |= immbits << 16;
9550 inst.instruction |= (size >> 3) << 7;
9551 inst.instruction |= (size & 0x7) << 19;
9553 inst.instruction |= (uval != 0) << 24;
9555 inst.instruction = neon_dp_fixup (inst.instruction);
9559 do_neon_shl_imm (void)
9561 if (!inst.operands[2].isreg)
9563 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
9564 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
9565 inst.instruction = NEON_ENC_IMMED (inst.instruction);
9566 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, inst.operands[2].imm);
9570 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
9571 struct neon_type_el et = neon_check_type (3, rs,
9572 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
9573 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
9574 neon_three_same (TRUE, rs == NS_QQQ, et.type == NT_unsigned, et.size);
9579 do_neon_qshl_imm (void)
9581 if (!inst.operands[2].isreg)
9583 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
9584 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
9585 inst.instruction = NEON_ENC_IMMED (inst.instruction);
9586 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
9587 inst.operands[2].imm);
9591 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
9592 struct neon_type_el et = neon_check_type (3, rs,
9593 N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
9594 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
9595 neon_three_same (TRUE, rs == NS_QQQ, et.type == NT_unsigned, et.size);
9600 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
9602 /* Handle .I8 and .I64 as pseudo-instructions. */
9606 /* Unfortunately, this will make everything apart from zero out-of-range.
9607 FIXME is this the intended semantics? There doesn't seem much point in
9608 accepting .I8 if so. */
9609 immediate |= immediate << 8;
9613 /* Similarly, anything other than zero will be replicated in bits [63:32],
9614 which probably isn't want we want if we specified .I64. */
9622 if (immediate == (immediate & 0x000000ff))
9624 *immbits = immediate;
9625 return (size == 16) ? 0x9 : 0x1;
9627 else if (immediate == (immediate & 0x0000ff00))
9629 *immbits = immediate >> 8;
9630 return (size == 16) ? 0xb : 0x3;
9632 else if (immediate == (immediate & 0x00ff0000))
9634 *immbits = immediate >> 16;
9637 else if (immediate == (immediate & 0xff000000))
9639 *immbits = immediate >> 24;
9644 inst.error = _("immediate value out of range");
9648 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
9652 neon_bits_same_in_bytes (unsigned imm)
9654 return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
9655 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
9656 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
9657 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
9660 /* For immediate of above form, return 0bABCD. */
9663 neon_squash_bits (unsigned imm)
9665 return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
9666 | ((imm & 0x01000000) >> 21);
9669 /* Returns 1 if a number has "quarter-precision" float format
9670 0baBbbbbbc defgh000 00000000 00000000. */
9673 neon_is_quarter_float (unsigned imm)
9675 int b = (imm & 0x20000000) != 0;
9676 int bs = (b << 25) | (b << 26) | (b << 27) | (b << 28) | (b << 29)
9678 return (imm & 0x81ffffff) == (imm & 0x81f80000)
9679 && ((imm & 0x7e000000) ^ bs) == 0;
9682 /* Compress above representation to 0b...000 abcdefgh. */
9685 neon_qfloat_bits (unsigned imm)
9687 return ((imm >> 19) & 0x7f) | (imm >> 24);
9690 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
9691 the instruction. *OP is passed as the initial value of the op field, and
9692 may be set to a different value depending on the constant (i.e.
9693 "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
9697 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, unsigned *immbits,
9700 if (size == 64 && neon_bits_same_in_bytes (immhi)
9701 && neon_bits_same_in_bytes (immlo))
9703 /* Check this one first so we don't have to bother with immhi in later
9707 *immbits = (neon_squash_bits (immhi) << 4) | neon_squash_bits (immlo);
9711 else if (immhi != 0)
9713 else if (immlo == (immlo & 0x000000ff))
9715 /* 64-bit case was already handled. Don't allow MVN with 8-bit
9717 if ((size != 8 && size != 16 && size != 32)
9718 || (size == 8 && *op == 1))
9721 return (size == 8) ? 0xe : (size == 16) ? 0x8 : 0x0;
9723 else if (immlo == (immlo & 0x0000ff00))
9725 if (size != 16 && size != 32)
9727 *immbits = immlo >> 8;
9728 return (size == 16) ? 0xa : 0x2;
9730 else if (immlo == (immlo & 0x00ff0000))
9734 *immbits = immlo >> 16;
9737 else if (immlo == (immlo & 0xff000000))
9741 *immbits = immlo >> 24;
9744 else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
9748 *immbits = (immlo >> 8) & 0xff;
9751 else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
9755 *immbits = (immlo >> 16) & 0xff;
9758 else if (neon_is_quarter_float (immlo))
9760 if (size != 32 || *op == 1)
9762 *immbits = neon_qfloat_bits (immlo);
9769 /* Write immediate bits [7:0] to the following locations:
9771 |28/24|23 19|18 16|15 4|3 0|
9772 | a |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
9774 This function is used by VMOV/VMVN/VORR/VBIC. */
9777 neon_write_immbits (unsigned immbits)
9779 inst.instruction |= immbits & 0xf;
9780 inst.instruction |= ((immbits >> 4) & 0x7) << 16;
9781 inst.instruction |= ((immbits >> 7) & 0x1) << 24;
9784 /* Invert low-order SIZE bits of XHI:XLO. */
9787 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
9789 unsigned immlo = xlo ? *xlo : 0;
9790 unsigned immhi = xhi ? *xhi : 0;
9795 immlo = (~immlo) & 0xff;
9799 immlo = (~immlo) & 0xffff;
9803 immhi = (~immhi) & 0xffffffff;
9807 immlo = (~immlo) & 0xffffffff;
9822 do_neon_logic (void)
9824 if (inst.operands[2].present && inst.operands[2].isreg)
9826 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
9827 neon_check_type (3, rs, N_IGNORE_TYPE);
9828 /* U bit and size field were set as part of the bitmask. */
9829 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
9830 neon_three_same (TRUE, rs == NS_QQQ, 0, -1);
9834 enum neon_shape rs = neon_check_shape (NS_DI_QI);
9835 struct neon_type_el et = neon_check_type (1, rs, N_I8 | N_I16 | N_I32
9837 enum neon_opc opcode = inst.instruction & 0x0fffffff;
9841 if (et.type == NT_invtype)
9844 inst.instruction = NEON_ENC_IMMED (inst.instruction);
9849 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
9854 cmode = neon_cmode_for_logic_imm (inst.operands[1].imm, &immbits,
9859 /* Pseudo-instruction for VBIC. */
9860 immbits = inst.operands[1].imm;
9861 neon_invert_size (&immbits, 0, et.size);
9862 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
9866 /* Pseudo-instruction for VORR. */
9867 immbits = inst.operands[1].imm;
9868 neon_invert_size (&immbits, 0, et.size);
9869 cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
9879 inst.instruction |= (rs == NS_QI) << 6;
9880 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
9881 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
9882 inst.instruction |= cmode << 8;
9883 neon_write_immbits (immbits);
9885 inst.instruction = neon_dp_fixup (inst.instruction);
9890 do_neon_bitfield (void)
9892 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
9893 /* FIXME: Check that no type was given. */
9894 neon_three_same (FALSE, rs == NS_QQQ, 0, -1);
9898 neon_dyadic (enum neon_el_type ubit_meaning, unsigned types)
9900 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
9901 struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK, types | N_KEY);
9902 if (et.type == NT_float)
9904 inst.instruction = NEON_ENC_FLOAT (inst.instruction);
9905 neon_three_same (TRUE, rs == NS_QQQ, 0, -1);
9909 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
9910 neon_three_same (TRUE, rs == NS_QQQ, et.type == ubit_meaning, et.size);
9915 do_neon_dyadic_if_su (void)
9917 neon_dyadic (NT_unsigned, N_SUF_32);
9921 do_neon_dyadic_if_su_d (void)
9923 /* This version only allow D registers, but that constraint is enforced during
9924 operand parsing so we don't need to do anything extra here. */
9925 neon_dyadic (NT_unsigned, N_SUF_32);
9929 do_neon_dyadic_if_i (void)
9931 neon_dyadic (NT_unsigned, N_IF_32);
9935 do_neon_dyadic_if_i_d (void)
9937 neon_dyadic (NT_unsigned, N_IF_32);
9941 do_neon_addsub_if_i (void)
9943 /* The "untyped" case can't happen. Do this to stop the "U" bit being
9944 affected if we specify unsigned args. */
9945 neon_dyadic (NT_untyped, N_IF_32 | N_I64);
9948 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
9950 V<op> A,B (A is operand 0, B is operand 2)
9955 so handle that case specially. */
9958 neon_exchange_operands (void)
9960 void *scratch = alloca (sizeof (inst.operands[0]));
9961 if (inst.operands[1].present)
9963 /* Swap operands[1] and operands[2]. */
9964 memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
9965 inst.operands[1] = inst.operands[2];
9966 memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
9970 inst.operands[1] = inst.operands[2];
9971 inst.operands[2] = inst.operands[0];
9976 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
9978 if (inst.operands[2].isreg)
9981 neon_exchange_operands ();
9982 neon_dyadic (NT_unsigned, regtypes);
9986 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
9987 struct neon_type_el et = neon_check_type (2, rs, N_EQK, immtypes | N_KEY);
9989 inst.instruction = NEON_ENC_IMMED (inst.instruction);
9990 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
9991 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
9992 inst.instruction |= LOW4 (inst.operands[1].reg);
9993 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
9994 inst.instruction |= (rs == NS_QQI) << 6;
9995 inst.instruction |= (et.type == NT_float) << 10;
9996 inst.instruction |= neon_logbits (et.size) << 18;
9998 inst.instruction = neon_dp_fixup (inst.instruction);
10005 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, FALSE);
10009 do_neon_cmp_inv (void)
10011 neon_compare (N_SUF_32, N_S8 | N_S16 | N_S32 | N_F32, TRUE);
10017 neon_compare (N_IF_32, N_IF_32, FALSE);
10020 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
10021 scalars, which are encoded in 5 bits, M : Rm.
10022 For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
10023 M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
10027 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
10029 unsigned regno = scalar >> 3;
10030 unsigned elno = scalar & 7;
10035 if (regno > 7 || elno > 3)
10037 return regno | (elno << 3);
10040 if (regno > 15 || elno > 1)
10042 return regno | (elno << 4);
10046 as_bad (_("Scalar out of range for multiply instruction"));
10052 /* Encode multiply / multiply-accumulate scalar instructions. */
10055 neon_mul_mac (struct neon_type_el et, int ubit)
10057 unsigned scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
10058 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10059 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10060 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10061 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10062 inst.instruction |= LOW4 (scalar);
10063 inst.instruction |= HI1 (scalar) << 5;
10064 inst.instruction |= (et.type == NT_float) << 8;
10065 inst.instruction |= neon_logbits (et.size) << 20;
10066 inst.instruction |= (ubit != 0) << 24;
10068 inst.instruction = neon_dp_fixup (inst.instruction);
10072 do_neon_mac_maybe_scalar (void)
10074 if (inst.operands[2].isscalar)
10076 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10077 struct neon_type_el et = neon_check_type (3, rs,
10078 N_EQK, N_EQK, N_I16 | N_I32 | N_F32 | N_KEY);
10079 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10080 neon_mul_mac (et, rs == NS_QQS);
10083 do_neon_dyadic_if_i ();
10089 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10090 struct neon_type_el et = neon_check_type (3, rs,
10091 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
10092 neon_three_same (TRUE, rs == NS_QQQ, 0, et.size);
10095 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
10096 same types as the MAC equivalents. The polynomial type for this instruction
10097 is encoded the same as the integer type. */
10102 if (inst.operands[2].isscalar)
10103 do_neon_mac_maybe_scalar ();
10105 neon_dyadic (NT_poly, N_I8 | N_I16 | N_I32 | N_F32 | N_P8);
10109 do_neon_qdmulh (void)
10111 if (inst.operands[2].isscalar)
10113 enum neon_shape rs = neon_check_shape (NS_DDS_QQS);
10114 struct neon_type_el et = neon_check_type (3, rs,
10115 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10116 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10117 neon_mul_mac (et, rs == NS_QQS);
10121 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10122 struct neon_type_el et = neon_check_type (3, rs,
10123 N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
10124 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10125 /* The U bit (rounding) comes from bit mask. */
10126 neon_three_same (TRUE, rs == NS_QQQ, 0, et.size);
10131 do_neon_fcmp_absolute (void)
10133 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10134 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10135 /* Size field comes from bit mask. */
10136 neon_three_same (TRUE, rs == NS_QQQ, 1, -1);
10140 do_neon_fcmp_absolute_inv (void)
10142 neon_exchange_operands ();
10143 do_neon_fcmp_absolute ();
10147 do_neon_step (void)
10149 enum neon_shape rs = neon_check_shape (NS_DDD_QQQ);
10150 neon_check_type (3, rs, N_EQK, N_EQK, N_F32 | N_KEY);
10151 neon_three_same (TRUE, rs == NS_QQQ, 0, -1);
10155 do_neon_abs_neg (void)
10157 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10158 struct neon_type_el et = neon_check_type (3, rs,
10159 N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_F32 | N_KEY);
10160 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10161 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10162 inst.instruction |= LOW4 (inst.operands[1].reg);
10163 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10164 inst.instruction |= (rs == NS_QQ) << 6;
10165 inst.instruction |= (et.type == NT_float) << 10;
10166 inst.instruction |= neon_logbits (et.size) << 18;
10168 inst.instruction = neon_dp_fixup (inst.instruction);
10174 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10175 struct neon_type_el et = neon_check_type (2, rs,
10176 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10177 int imm = inst.operands[2].imm;
10178 constraint (imm < 0 || (unsigned)imm >= et.size,
10179 _("immediate out of range for insert"));
10180 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10186 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10187 struct neon_type_el et = neon_check_type (2, rs,
10188 N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10189 int imm = inst.operands[2].imm;
10190 constraint (imm < 1 || (unsigned)imm > et.size,
10191 _("immediate out of range for insert"));
10192 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, et.size - imm);
10196 do_neon_qshlu_imm (void)
10198 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10199 struct neon_type_el et = neon_check_type (2, rs,
10200 N_EQK | N_UNS, N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
10201 int imm = inst.operands[2].imm;
10202 constraint (imm < 0 || (unsigned)imm >= et.size,
10203 _("immediate out of range for shift"));
10204 /* Only encodes the 'U present' variant of the instruction.
10205 In this case, signed types have OP (bit 8) set to 0.
10206 Unsigned types have OP set to 1. */
10207 inst.instruction |= (et.type == NT_unsigned) << 8;
10208 /* The rest of the bits are the same as other immediate shifts. */
10209 neon_imm_shift (FALSE, 0, rs == NS_QQI, et, imm);
10213 do_neon_qmovn (void)
10215 struct neon_type_el et = neon_check_type (2, NS_DQ,
10216 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10217 /* Saturating move where operands can be signed or unsigned, and the
10218 destination has the same signedness. */
10219 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10220 if (et.type == NT_unsigned)
10221 inst.instruction |= 0xc0;
10223 inst.instruction |= 0x80;
10224 neon_two_same (0, 1, et.size / 2);
10228 do_neon_qmovun (void)
10230 struct neon_type_el et = neon_check_type (2, NS_DQ,
10231 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10232 /* Saturating move with unsigned results. Operands must be signed. */
10233 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10234 neon_two_same (0, 1, et.size / 2);
10238 do_neon_rshift_sat_narrow (void)
10240 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10241 or unsigned. If operands are unsigned, results must also be unsigned. */
10242 struct neon_type_el et = neon_check_type (2, NS_DQI,
10243 N_EQK | N_HLF, N_SU_16_64 | N_KEY);
10244 int imm = inst.operands[2].imm;
10245 /* This gets the bounds check, size encoding and immediate bits calculation
10249 /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
10250 VQMOVN.I<size> <Dd>, <Qm>. */
10253 inst.operands[2].present = 0;
10254 inst.instruction = N_MNEM_vqmovn;
10259 constraint (imm < 1 || (unsigned)imm > et.size,
10260 _("immediate out of range"));
10261 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
10265 do_neon_rshift_sat_narrow_u (void)
10267 /* FIXME: Types for narrowing. If operands are signed, results can be signed
10268 or unsigned. If operands are unsigned, results must also be unsigned. */
10269 struct neon_type_el et = neon_check_type (2, NS_DQI,
10270 N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
10271 int imm = inst.operands[2].imm;
10272 /* This gets the bounds check, size encoding and immediate bits calculation
10276 /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
10277 VQMOVUN.I<size> <Dd>, <Qm>. */
10280 inst.operands[2].present = 0;
10281 inst.instruction = N_MNEM_vqmovun;
10286 constraint (imm < 1 || (unsigned)imm > et.size,
10287 _("immediate out of range"));
10288 /* FIXME: The manual is kind of unclear about what value U should have in
10289 VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
10291 neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
10295 do_neon_movn (void)
10297 struct neon_type_el et = neon_check_type (2, NS_DQ,
10298 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10299 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10300 neon_two_same (0, 1, et.size / 2);
10304 do_neon_rshift_narrow (void)
10306 struct neon_type_el et = neon_check_type (2, NS_DQI,
10307 N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
10308 int imm = inst.operands[2].imm;
10309 /* This gets the bounds check, size encoding and immediate bits calculation
10313 /* If immediate is zero then we are a pseudo-instruction for
10314 VMOVN.I<size> <Dd>, <Qm> */
10317 inst.operands[2].present = 0;
10318 inst.instruction = N_MNEM_vmovn;
10323 constraint (imm < 1 || (unsigned)imm > et.size,
10324 _("immediate out of range for narrowing operation"));
10325 neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
10329 do_neon_shll (void)
10331 /* FIXME: Type checking when lengthening. */
10332 struct neon_type_el et = neon_check_type (2, NS_QDI,
10333 N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
10334 unsigned imm = inst.operands[2].imm;
10336 if (imm == et.size)
10338 /* Maximum shift variant. */
10339 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10340 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10341 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10342 inst.instruction |= LOW4 (inst.operands[1].reg);
10343 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10344 inst.instruction |= neon_logbits (et.size) << 18;
10346 inst.instruction = neon_dp_fixup (inst.instruction);
10350 /* A more-specific type check for non-max versions. */
10351 et = neon_check_type (2, NS_QDI,
10352 N_EQK | N_DBL, N_SU_32 | N_KEY);
10353 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10354 neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
10358 /* Check the various types for the VCVT instruction, and return the one that
10359 the current instruction is. */
10362 neon_cvt_flavour (enum neon_shape rs)
10364 #define CVT_VAR(C,X,Y) \
10365 et = neon_check_type (2, rs, (X), (Y)); \
10366 if (et.type != NT_invtype) \
10368 inst.error = NULL; \
10371 struct neon_type_el et;
10373 CVT_VAR (0, N_S32, N_F32);
10374 CVT_VAR (1, N_U32, N_F32);
10375 CVT_VAR (2, N_F32, N_S32);
10376 CVT_VAR (3, N_F32, N_U32);
10385 /* Fixed-point conversion with #0 immediate is encoded as an integer
10387 if (inst.operands[2].present && inst.operands[2].imm != 0)
10389 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10390 int flavour = neon_cvt_flavour (rs);
10391 unsigned immbits = 32 - inst.operands[2].imm;
10392 unsigned enctab[] = { 0x0000100, 0x1000100, 0x0, 0x1000000 };
10393 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10395 inst.instruction |= enctab[flavour];
10396 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10397 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10398 inst.instruction |= LOW4 (inst.operands[1].reg);
10399 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10400 inst.instruction |= (rs == NS_QQI) << 6;
10401 inst.instruction |= 1 << 21;
10402 inst.instruction |= immbits << 16;
10406 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10407 int flavour = neon_cvt_flavour (rs);
10408 unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080 };
10409 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10411 inst.instruction |= enctab[flavour];
10412 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10413 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10414 inst.instruction |= LOW4 (inst.operands[1].reg);
10415 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10416 inst.instruction |= (rs == NS_QQ) << 6;
10417 inst.instruction |= 2 << 18;
10419 inst.instruction = neon_dp_fixup (inst.instruction);
10423 neon_move_immediate (void)
10425 enum neon_shape rs = neon_check_shape (NS_DI_QI);
10426 struct neon_type_el et = neon_check_type (1, rs,
10427 N_I8 | N_I16 | N_I32 | N_I64 | N_F32);
10428 unsigned immlo, immhi = 0, immbits;
10431 /* We start out as an MVN instruction if OP = 1, MOV otherwise. */
10432 op = (inst.instruction & (1 << 5)) != 0;
10434 immlo = inst.operands[1].imm;
10435 if (inst.operands[1].regisimm)
10436 immhi = inst.operands[1].reg;
10438 constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
10439 _("immediate has bits set outside the operand size"));
10441 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
10444 /* Invert relevant bits only. */
10445 neon_invert_size (&immlo, &immhi, et.size);
10446 /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
10447 with one or the other; those cases are caught by
10448 neon_cmode_for_move_imm. */
10450 if ((cmode = neon_cmode_for_move_imm (immlo, immhi, &immbits, &op,
10453 inst.error = _("immediate out of range");
10458 inst.instruction &= ~(1 << 5);
10459 inst.instruction |= op << 5;
10461 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10462 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10463 inst.instruction |= (rs == NS_QI) << 6;
10464 inst.instruction |= cmode << 8;
10466 neon_write_immbits (immbits);
10472 if (inst.operands[1].isreg)
10474 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10476 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10477 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10478 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10479 inst.instruction |= LOW4 (inst.operands[1].reg);
10480 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10481 inst.instruction |= (rs == NS_QQ) << 6;
10485 inst.instruction = NEON_ENC_IMMED (inst.instruction);
10486 neon_move_immediate ();
10489 inst.instruction = neon_dp_fixup (inst.instruction);
10492 /* Encode instructions of form:
10494 |28/24|23|22|21 20|19 16|15 12|11 8|7|6|5|4|3 0|
10495 | U |x |D |size | Rn | Rd |x x x x|N|x|M|x| Rm |
10500 neon_mixed_length (struct neon_type_el et, unsigned size)
10502 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10503 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10504 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10505 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10506 inst.instruction |= LOW4 (inst.operands[2].reg);
10507 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10508 inst.instruction |= (et.type == NT_unsigned) << 24;
10509 inst.instruction |= neon_logbits (size) << 20;
10511 inst.instruction = neon_dp_fixup (inst.instruction);
10515 do_neon_dyadic_long (void)
10517 /* FIXME: Type checking for lengthening op. */
10518 struct neon_type_el et = neon_check_type (3, NS_QDD,
10519 N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
10520 neon_mixed_length (et, et.size);
10524 do_neon_abal (void)
10526 struct neon_type_el et = neon_check_type (3, NS_QDD,
10527 N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
10528 neon_mixed_length (et, et.size);
10532 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
10534 if (inst.operands[2].isscalar)
10536 struct neon_type_el et = neon_check_type (2, NS_QDS,
10537 N_EQK | N_DBL, regtypes | N_KEY);
10538 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10539 neon_mul_mac (et, et.type == NT_unsigned);
10543 struct neon_type_el et = neon_check_type (3, NS_QDD,
10544 N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
10545 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10546 neon_mixed_length (et, et.size);
10551 do_neon_mac_maybe_scalar_long (void)
10553 neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
10557 do_neon_dyadic_wide (void)
10559 struct neon_type_el et = neon_check_type (3, NS_QQD,
10560 N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
10561 neon_mixed_length (et, et.size);
10565 do_neon_dyadic_narrow (void)
10567 struct neon_type_el et = neon_check_type (3, NS_QDD,
10568 N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
10569 neon_mixed_length (et, et.size / 2);
10573 do_neon_mul_sat_scalar_long (void)
10575 neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
10579 do_neon_vmull (void)
10581 if (inst.operands[2].isscalar)
10582 do_neon_mac_maybe_scalar_long ();
10585 struct neon_type_el et = neon_check_type (3, NS_QDD,
10586 N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_KEY);
10587 if (et.type == NT_poly)
10588 inst.instruction = NEON_ENC_POLY (inst.instruction);
10590 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10591 /* For polynomial encoding, size field must be 0b00 and the U bit must be
10592 zero. Should be OK as-is. */
10593 neon_mixed_length (et, et.size);
10600 enum neon_shape rs = neon_check_shape (NS_DDDI_QQQI);
10601 struct neon_type_el et = neon_check_type (3, rs,
10602 N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
10603 unsigned imm = (inst.operands[3].imm * et.size) / 8;
10604 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10605 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10606 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10607 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10608 inst.instruction |= LOW4 (inst.operands[2].reg);
10609 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10610 inst.instruction |= (rs == NS_QQQI) << 6;
10611 inst.instruction |= imm << 8;
10613 inst.instruction = neon_dp_fixup (inst.instruction);
10619 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10620 struct neon_type_el et = neon_check_type (2, rs,
10621 N_EQK, N_8 | N_16 | N_32 | N_KEY);
10622 unsigned op = (inst.instruction >> 7) & 3;
10623 /* N (width of reversed regions) is encoded as part of the bitmask. We
10624 extract it here to check the elements to be reversed are smaller.
10625 Otherwise we'd get a reserved instruction. */
10626 unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
10627 assert (elsize != 0);
10628 constraint (et.size >= elsize,
10629 _("elements must be smaller than reversal region"));
10630 neon_two_same (rs == NS_QQ, 1, et.size);
10636 if (inst.operands[1].isscalar)
10638 enum neon_shape rs = neon_check_shape (NS_DS_QS);
10639 struct neon_type_el et = neon_check_type (1, rs, N_8 | N_16 | N_32);
10640 unsigned sizebits = et.size >> 3;
10641 unsigned dm = inst.operands[1].reg >> 3;
10642 int logsize = neon_logbits (et.size);
10643 unsigned x = (inst.operands[1].reg & 7) << logsize;
10644 inst.instruction = NEON_ENC_SCALAR (inst.instruction);
10645 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10646 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10647 inst.instruction |= LOW4 (dm);
10648 inst.instruction |= HI1 (dm) << 5;
10649 inst.instruction |= (rs == NS_QS) << 6;
10650 inst.instruction |= x << 17;
10651 inst.instruction |= sizebits << 16;
10653 inst.instruction = neon_dp_fixup (inst.instruction);
10657 enum neon_shape rs = neon_check_shape (NS_DR_QR);
10658 struct neon_type_el et = neon_check_type (1, rs, N_8 | N_16 | N_32);
10659 unsigned save_cond = inst.instruction & 0xf0000000;
10660 /* Duplicate ARM register to lanes of vector. */
10661 inst.instruction = NEON_ENC_ARMREG (inst.instruction);
10664 case 8: inst.instruction |= 0x400000; break;
10665 case 16: inst.instruction |= 0x000020; break;
10666 case 32: inst.instruction |= 0x000000; break;
10669 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
10670 inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
10671 inst.instruction |= HI1 (inst.operands[0].reg) << 7;
10672 inst.instruction |= (rs == NS_QR) << 21;
10673 /* The encoding for this instruction is identical for the ARM and Thumb
10674 variants, except for the condition field. */
10676 inst.instruction |= 0xe0000000;
10678 inst.instruction |= save_cond;
10682 /* VMOV has particularly many variations. It can be one of:
10683 0. VMOV<c><q> <Qd>, <Qm>
10684 1. VMOV<c><q> <Dd>, <Dm>
10685 (Register operations, which are VORR with Rm = Rn.)
10686 2. VMOV<c><q>.<dt> <Qd>, #<imm>
10687 3. VMOV<c><q>.<dt> <Dd>, #<imm>
10689 4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
10690 (ARM register to scalar.)
10691 5. VMOV<c><q> <Dm>, <Rd>, <Rn>
10692 (Two ARM registers to vector.)
10693 6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
10694 (Scalar to ARM register.)
10695 7. VMOV<c><q> <Rd>, <Rn>, <Dm>
10696 (Vector to two ARM registers.)
10698 We should have just enough information to be able to disambiguate most of
10699 these, apart from "Two ARM registers to vector" and "Vector to two ARM
10700 registers" cases. For these, abuse the .regisimm operand field to signify a
10703 All the encoded bits are hardcoded by this function.
10705 FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
10706 can specify a type where it doesn't make sense to, and is ignored).
10712 int nargs = inst.operands[0].present + inst.operands[1].present
10713 + inst.operands[2].present;
10714 unsigned save_cond = thumb_mode ? 0xe0000000 : inst.instruction & 0xf0000000;
10719 /* Cases 0, 1, 2, 3, 4, 6. */
10720 if (inst.operands[1].isscalar)
10723 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
10724 N_S8 | N_S16 | N_U8 | N_U16 | N_32);
10725 unsigned logsize = neon_logbits (et.size);
10726 unsigned dn = inst.operands[1].reg >> 3;
10727 unsigned x = inst.operands[1].reg & 7;
10728 unsigned abcdebits = 0;
10730 constraint (x >= 64 / et.size, _("scalar index out of range"));
10734 case 8: abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
10735 case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
10736 case 32: abcdebits = 0x00; break;
10740 abcdebits |= x << logsize;
10741 inst.instruction = save_cond;
10742 inst.instruction |= 0xe100b10;
10743 inst.instruction |= LOW4 (dn) << 16;
10744 inst.instruction |= HI1 (dn) << 7;
10745 inst.instruction |= inst.operands[0].reg << 12;
10746 inst.instruction |= (abcdebits & 3) << 5;
10747 inst.instruction |= (abcdebits >> 2) << 21;
10749 else if (inst.operands[1].isreg)
10751 /* Cases 0, 1, 4. */
10752 if (inst.operands[0].isscalar)
10755 unsigned bcdebits = 0;
10756 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
10757 N_8 | N_16 | N_32);
10758 int logsize = neon_logbits (et.size);
10759 unsigned dn = inst.operands[0].reg >> 3;
10760 unsigned x = inst.operands[0].reg & 7;
10762 constraint (x >= 64 / et.size, _("scalar index out of range"));
10766 case 8: bcdebits = 0x8; break;
10767 case 16: bcdebits = 0x1; break;
10768 case 32: bcdebits = 0x0; break;
10772 bcdebits |= x << logsize;
10773 inst.instruction = save_cond;
10774 inst.instruction |= 0xe000b10;
10775 inst.instruction |= LOW4 (dn) << 16;
10776 inst.instruction |= HI1 (dn) << 7;
10777 inst.instruction |= inst.operands[1].reg << 12;
10778 inst.instruction |= (bcdebits & 3) << 5;
10779 inst.instruction |= (bcdebits >> 2) << 21;
10784 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10785 /* The architecture manual I have doesn't explicitly state which
10786 value the U bit should have for register->register moves, but
10787 the equivalent VORR instruction has U = 0, so do that. */
10788 inst.instruction = 0x0200110;
10789 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10790 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10791 inst.instruction |= LOW4 (inst.operands[1].reg);
10792 inst.instruction |= HI1 (inst.operands[1].reg) << 5;
10793 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10794 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10795 inst.instruction |= (rs == NS_QQ) << 6;
10797 inst.instruction = neon_dp_fixup (inst.instruction);
10803 inst.instruction = 0x0800010;
10804 neon_move_immediate ();
10805 inst.instruction = neon_dp_fixup (inst.instruction);
10811 if (inst.operands[0].regisimm)
10814 inst.instruction = save_cond;
10815 inst.instruction |= 0xc400b10;
10816 inst.instruction |= LOW4 (inst.operands[0].reg);
10817 inst.instruction |= HI1 (inst.operands[0].reg) << 5;
10818 inst.instruction |= inst.operands[1].reg << 12;
10819 inst.instruction |= inst.operands[2].reg << 16;
10824 inst.instruction = save_cond;
10825 inst.instruction |= 0xc500b10;
10826 inst.instruction |= inst.operands[0].reg << 12;
10827 inst.instruction |= inst.operands[1].reg << 16;
10828 inst.instruction |= LOW4 (inst.operands[2].reg);
10829 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10839 do_neon_rshift_round_imm (void)
10841 enum neon_shape rs = neon_check_shape (NS_DDI_QQI);
10842 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
10843 int imm = inst.operands[2].imm;
10845 /* imm == 0 case is encoded as VMOV for V{R}SHR. */
10848 inst.operands[2].present = 0;
10853 constraint (imm < 1 || (unsigned)imm > et.size,
10854 _("immediate out of range for shift"));
10855 neon_imm_shift (TRUE, et.type == NT_unsigned, rs == NS_QQI, et,
10860 do_neon_movl (void)
10862 struct neon_type_el et = neon_check_type (2, NS_QD,
10863 N_EQK | N_DBL, N_SU_32 | N_KEY);
10864 unsigned sizebits = et.size >> 3;
10865 inst.instruction |= sizebits << 19;
10866 neon_two_same (0, et.type == NT_unsigned, -1);
10872 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10873 struct neon_type_el et = neon_check_type (2, rs,
10874 N_EQK, N_8 | N_16 | N_32 | N_KEY);
10875 inst.instruction = NEON_ENC_INTEGER (inst.instruction);
10876 neon_two_same (rs == NS_QQ, 1, et.size);
10880 do_neon_zip_uzp (void)
10882 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10883 struct neon_type_el et = neon_check_type (2, rs,
10884 N_EQK, N_8 | N_16 | N_32 | N_KEY);
10885 if (rs == NS_DD && et.size == 32)
10887 /* Special case: encode as VTRN.32 <Dd>, <Dm>. */
10888 inst.instruction = N_MNEM_vtrn;
10892 neon_two_same (rs == NS_QQ, 1, et.size);
10896 do_neon_sat_abs_neg (void)
10898 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10899 struct neon_type_el et = neon_check_type (2, rs,
10900 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
10901 neon_two_same (rs == NS_QQ, 1, et.size);
10905 do_neon_pair_long (void)
10907 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10908 struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
10909 /* Unsigned is encoded in OP field (bit 7) for these instruction. */
10910 inst.instruction |= (et.type == NT_unsigned) << 7;
10911 neon_two_same (rs == NS_QQ, 1, et.size);
10915 do_neon_recip_est (void)
10917 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10918 struct neon_type_el et = neon_check_type (2, rs,
10919 N_EQK | N_FLT, N_F32 | N_U32 | N_KEY);
10920 inst.instruction |= (et.type == NT_float) << 8;
10921 neon_two_same (rs == NS_QQ, 1, et.size);
10927 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10928 struct neon_type_el et = neon_check_type (2, rs,
10929 N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
10930 neon_two_same (rs == NS_QQ, 1, et.size);
10936 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10937 struct neon_type_el et = neon_check_type (2, rs,
10938 N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
10939 neon_two_same (rs == NS_QQ, 1, et.size);
10945 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10946 struct neon_type_el et = neon_check_type (2, rs,
10947 N_EQK | N_INT, N_8 | N_KEY);
10948 neon_two_same (rs == NS_QQ, 1, et.size);
10954 enum neon_shape rs = neon_check_shape (NS_DD_QQ);
10955 neon_two_same (rs == NS_QQ, 1, -1);
10959 do_neon_tbl_tbx (void)
10961 unsigned listlenbits;
10962 neon_check_type (1, NS_DLD, N_8);
10964 if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
10966 inst.error = _("bad list length for table lookup");
10970 listlenbits = inst.operands[1].imm - 1;
10971 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
10972 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
10973 inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
10974 inst.instruction |= HI1 (inst.operands[1].reg) << 7;
10975 inst.instruction |= LOW4 (inst.operands[2].reg);
10976 inst.instruction |= HI1 (inst.operands[2].reg) << 5;
10977 inst.instruction |= listlenbits << 8;
10979 inst.instruction = neon_dp_fixup (inst.instruction);
10983 do_neon_ldm_stm (void)
10985 /* P, U and L bits are part of bitmask. */
10986 int is_dbmode = (inst.instruction & (1 << 24)) != 0;
10987 unsigned offsetbits = inst.operands[1].imm * 2;
10989 constraint (is_dbmode && !inst.operands[0].writeback,
10990 _("writeback (!) must be used for VLDMDB and VSTMDB"));
10992 constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
10993 _("register list must contain at least 1 and at most 16 "
10996 inst.instruction |= inst.operands[0].reg << 16;
10997 inst.instruction |= inst.operands[0].writeback << 21;
10998 inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
10999 inst.instruction |= HI1 (inst.operands[1].reg) << 22;
11001 inst.instruction |= offsetbits;
11004 inst.instruction |= 0xe0000000;
11008 do_neon_ldr_str (void)
11010 unsigned offsetbits;
11012 int is_ldr = (inst.instruction & (1 << 20)) != 0;
11014 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11015 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11017 constraint (inst.reloc.pc_rel && !is_ldr,
11018 _("PC-relative addressing unavailable with VSTR"));
11020 constraint (!inst.reloc.pc_rel && inst.reloc.exp.X_op != O_constant,
11021 _("Immediate value must be a constant"));
11023 if (inst.reloc.exp.X_add_number < 0)
11026 offsetbits = -inst.reloc.exp.X_add_number / 4;
11029 offsetbits = inst.reloc.exp.X_add_number / 4;
11031 /* FIXME: Does this catch everything? */
11032 constraint (!inst.operands[1].isreg || !inst.operands[1].preind
11033 || inst.operands[1].postind || inst.operands[1].writeback
11034 || inst.operands[1].immisreg || inst.operands[1].shifted,
11036 constraint ((inst.operands[1].imm & 3) != 0,
11037 _("Offset must be a multiple of 4"));
11038 constraint (offsetbits != (offsetbits & 0xff),
11039 _("Immediate offset out of range"));
11041 inst.instruction |= inst.operands[1].reg << 16;
11042 inst.instruction |= offsetbits & 0xff;
11043 inst.instruction |= offset_up << 23;
11046 inst.instruction |= 0xe0000000;
11048 if (inst.reloc.pc_rel)
11051 inst.reloc.type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
11053 inst.reloc.type = BFD_RELOC_ARM_CP_OFF_IMM;
11056 inst.reloc.type = BFD_RELOC_UNUSED;
11059 /* "interleave" version also handles non-interleaving register VLD1/VST1
11063 do_neon_ld_st_interleave (void)
11065 struct neon_type_el et = neon_check_type (1, NS_IGNORE,
11066 N_8 | N_16 | N_32 | N_64);
11067 unsigned alignbits = 0;
11069 /* The bits in this table go:
11070 0: register stride of one (0) or two (1)
11071 1,2: register list length, minus one (1, 2, 3, 4).
11072 3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
11073 We use -1 for invalid entries. */
11074 const int typetable[] =
11076 0x7, -1, 0xa, -1, 0x6, -1, 0x2, -1, /* VLD1 / VST1. */
11077 -1, -1, 0x8, 0x9, -1, -1, 0x3, -1, /* VLD2 / VST2. */
11078 -1, -1, -1, -1, 0x4, 0x5, -1, -1, /* VLD3 / VST3. */
11079 -1, -1, -1, -1, -1, -1, 0x0, 0x1 /* VLD4 / VST4. */
11083 if (inst.operands[1].immisalign)
11084 switch (inst.operands[1].imm >> 8)
11086 case 64: alignbits = 1; break;
11088 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11089 goto bad_alignment;
11093 if (NEON_REGLIST_LENGTH (inst.operands[0].imm) == 3)
11094 goto bad_alignment;
11099 inst.error = _("bad alignment");
11103 inst.instruction |= alignbits << 4;
11104 inst.instruction |= neon_logbits (et.size) << 6;
11106 /* Bits [4:6] of the immediate in a list specifier encode register stride
11107 (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
11108 VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
11109 up the right value for "type" in a table based on this value and the given
11110 list style, then stick it back. */
11111 idx = ((inst.operands[0].imm >> 4) & 7)
11112 | (((inst.instruction >> 8) & 3) << 3);
11114 typebits = typetable[idx];
11116 constraint (typebits == -1, _("bad list type for instruction"));
11118 inst.instruction &= ~0xf00;
11119 inst.instruction |= typebits << 8;
11122 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
11123 *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
11124 otherwise. The variable arguments are a list of pairs of legal (size, align)
11125 values, terminated with -1. */
11128 neon_alignment_bit (int size, int align, int *do_align, ...)
11131 int result = FAIL, thissize, thisalign;
11133 if (!inst.operands[1].immisalign)
11139 va_start (ap, do_align);
11143 thissize = va_arg (ap, int);
11144 if (thissize == -1)
11146 thisalign = va_arg (ap, int);
11148 if (size == thissize && align == thisalign)
11151 while (result != SUCCESS);
11155 if (result == SUCCESS)
11158 inst.error = _("unsupported alignment for instruction");
11164 do_neon_ld_st_lane (void)
11166 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11167 int align_good, do_align = 0;
11168 int logsize = neon_logbits (et.size);
11169 int align = inst.operands[1].imm >> 8;
11170 int n = (inst.instruction >> 8) & 3;
11171 int max_el = 64 / et.size;
11173 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
11174 _("bad list length"));
11175 constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
11176 _("scalar index out of range"));
11177 constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
11179 _("stride of 2 unavailable when element size is 8"));
11183 case 0: /* VLD1 / VST1. */
11184 align_good = neon_alignment_bit (et.size, align, &do_align, 16, 16,
11186 if (align_good == FAIL)
11190 unsigned alignbits = 0;
11193 case 16: alignbits = 0x1; break;
11194 case 32: alignbits = 0x3; break;
11197 inst.instruction |= alignbits << 4;
11201 case 1: /* VLD2 / VST2. */
11202 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 16, 16, 32,
11204 if (align_good == FAIL)
11207 inst.instruction |= 1 << 4;
11210 case 2: /* VLD3 / VST3. */
11211 constraint (inst.operands[1].immisalign,
11212 _("can't use alignment with this instruction"));
11215 case 3: /* VLD4 / VST4. */
11216 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11217 16, 64, 32, 64, 32, 128, -1);
11218 if (align_good == FAIL)
11222 unsigned alignbits = 0;
11225 case 8: alignbits = 0x1; break;
11226 case 16: alignbits = 0x1; break;
11227 case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
11230 inst.instruction |= alignbits << 4;
11237 /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32. */
11238 if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11239 inst.instruction |= 1 << (4 + logsize);
11241 inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
11242 inst.instruction |= logsize << 10;
11245 /* Encode single n-element structure to all lanes VLD<n> instructions. */
11248 do_neon_ld_dup (void)
11250 struct neon_type_el et = neon_check_type (1, NS_IGNORE, N_8 | N_16 | N_32);
11251 int align_good, do_align = 0;
11253 switch ((inst.instruction >> 8) & 3)
11255 case 0: /* VLD1. */
11256 assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
11257 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11258 &do_align, 16, 16, 32, 32, -1);
11259 if (align_good == FAIL)
11261 switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
11264 case 2: inst.instruction |= 1 << 5; break;
11265 default: inst.error = _("bad list length"); return;
11267 inst.instruction |= neon_logbits (et.size) << 6;
11270 case 1: /* VLD2. */
11271 align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
11272 &do_align, 8, 16, 16, 32, 32, 64, -1);
11273 if (align_good == FAIL)
11275 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
11276 _("bad list length"));
11277 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11278 inst.instruction |= 1 << 5;
11279 inst.instruction |= neon_logbits (et.size) << 6;
11282 case 2: /* VLD3. */
11283 constraint (inst.operands[1].immisalign,
11284 _("can't use alignment with this instruction"));
11285 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
11286 _("bad list length"));
11287 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11288 inst.instruction |= 1 << 5;
11289 inst.instruction |= neon_logbits (et.size) << 6;
11292 case 3: /* VLD4. */
11294 int align = inst.operands[1].imm >> 8;
11295 align_good = neon_alignment_bit (et.size, align, &do_align, 8, 32,
11296 16, 64, 32, 64, 32, 128, -1);
11297 if (align_good == FAIL)
11299 constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
11300 _("bad list length"));
11301 if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
11302 inst.instruction |= 1 << 5;
11303 if (et.size == 32 && align == 128)
11304 inst.instruction |= 0x3 << 6;
11306 inst.instruction |= neon_logbits (et.size) << 6;
11313 inst.instruction |= do_align << 4;
11316 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
11317 apart from bits [11:4]. */
11320 do_neon_ldx_stx (void)
11322 switch (NEON_LANE (inst.operands[0].imm))
11324 case NEON_INTERLEAVE_LANES:
11325 inst.instruction = NEON_ENC_INTERLV (inst.instruction);
11326 do_neon_ld_st_interleave ();
11329 case NEON_ALL_LANES:
11330 inst.instruction = NEON_ENC_DUP (inst.instruction);
11335 inst.instruction = NEON_ENC_LANE (inst.instruction);
11336 do_neon_ld_st_lane ();
11339 /* L bit comes from bit mask. */
11340 inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
11341 inst.instruction |= HI1 (inst.operands[0].reg) << 22;
11342 inst.instruction |= inst.operands[1].reg << 16;
11344 if (inst.operands[1].postind)
11346 int postreg = inst.operands[1].imm & 0xf;
11347 constraint (!inst.operands[1].immisreg,
11348 _("post-index must be a register"));
11349 constraint (postreg == 0xd || postreg == 0xf,
11350 _("bad register for post-index"));
11351 inst.instruction |= postreg;
11353 else if (inst.operands[1].writeback)
11355 inst.instruction |= 0xd;
11358 inst.instruction |= 0xf;
11361 inst.instruction |= 0xf9000000;
11363 inst.instruction |= 0xf4000000;
11367 /* Overall per-instruction processing. */
11369 /* We need to be able to fix up arbitrary expressions in some statements.
11370 This is so that we can handle symbols that are an arbitrary distance from
11371 the pc. The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
11372 which returns part of an address in a form which will be valid for
11373 a data instruction. We do this by pushing the expression into a symbol
11374 in the expr_section, and creating a fix for that. */
11377 fix_new_arm (fragS * frag,
11392 new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
11396 new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
11401 /* Mark whether the fix is to a THUMB instruction, or an ARM
11403 new_fix->tc_fix_data = thumb_mode;
11406 /* Create a frg for an instruction requiring relaxation. */
11408 output_relax_insn (void)
11415 /* The size of the instruction is unknown, so tie the debug info to the
11416 start of the instruction. */
11417 dwarf2_emit_insn (0);
11420 switch (inst.reloc.exp.X_op)
11423 sym = inst.reloc.exp.X_add_symbol;
11424 offset = inst.reloc.exp.X_add_number;
11428 offset = inst.reloc.exp.X_add_number;
11431 sym = make_expr_symbol (&inst.reloc.exp);
11435 to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
11436 inst.relax, sym, offset, NULL/*offset, opcode*/);
11437 md_number_to_chars (to, inst.instruction, THUMB_SIZE);
11440 /* Write a 32-bit thumb instruction to buf. */
11442 put_thumb32_insn (char * buf, unsigned long insn)
11444 md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
11445 md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
11449 output_inst (const char * str)
11455 as_bad ("%s -- `%s'", inst.error, str);
11459 output_relax_insn();
11462 if (inst.size == 0)
11465 to = frag_more (inst.size);
11467 if (thumb_mode && (inst.size > THUMB_SIZE))
11469 assert (inst.size == (2 * THUMB_SIZE));
11470 put_thumb32_insn (to, inst.instruction);
11472 else if (inst.size > INSN_SIZE)
11474 assert (inst.size == (2 * INSN_SIZE));
11475 md_number_to_chars (to, inst.instruction, INSN_SIZE);
11476 md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
11479 md_number_to_chars (to, inst.instruction, inst.size);
11481 if (inst.reloc.type != BFD_RELOC_UNUSED)
11482 fix_new_arm (frag_now, to - frag_now->fr_literal,
11483 inst.size, & inst.reloc.exp, inst.reloc.pc_rel,
11487 dwarf2_emit_insn (inst.size);
11491 /* Parse a Neon type specifier. *STR should point at the leading '.'
11492 character. Does no verification at this stage that the type fits the opcode
11499 Can all be legally parsed by this function.
11501 Fills in neon_type struct pointer with parsed information, and updates STR
11502 to point after the parsed type specifier. Returns TRUE if this was a legal
11503 type, FALSE if not. */
11506 parse_neon_type (struct neon_type *type, char **str)
11513 while (type->elems < NEON_MAX_TYPE_ELS)
11515 enum neon_el_type thistype = NT_untyped;
11516 unsigned thissize = -1u;
11523 /* Just a size without an explicit type. */
11524 if (ISDIGIT (*ptr))
11529 case 'i': thistype = NT_integer; break;
11530 case 'f': thistype = NT_float; break;
11531 case 'p': thistype = NT_poly; break;
11532 case 's': thistype = NT_signed; break;
11533 case 'u': thistype = NT_unsigned; break;
11535 as_bad (_("Unexpected character `%c' in type specifier"), *ptr);
11541 /* .f is an abbreviation for .f32. */
11542 if (thistype == NT_float && !ISDIGIT (*ptr))
11547 thissize = strtoul (ptr, &ptr, 10);
11549 if (thissize != 8 && thissize != 16 && thissize != 32
11552 as_bad (_("Bad size %d in type specifier"), thissize);
11559 type->el[type->elems].type = thistype;
11560 type->el[type->elems].size = thissize;
11570 /* Tag values used in struct asm_opcode's tag field. */
11573 OT_unconditional, /* Instruction cannot be conditionalized.
11574 The ARM condition field is still 0xE. */
11575 OT_unconditionalF, /* Instruction cannot be conditionalized
11576 and carries 0xF in its ARM condition field. */
11577 OT_csuffix, /* Instruction takes a conditional suffix. */
11578 OT_cinfix3, /* Instruction takes a conditional infix,
11579 beginning at character index 3. (In
11580 unified mode, it becomes a suffix.) */
11581 OT_cinfix3_legacy, /* Legacy instruction takes a conditional infix at
11582 character index 3, even in unified mode. Used for
11583 legacy instructions where suffix and infix forms
11584 may be ambiguous. */
11585 OT_csuf_or_in3, /* Instruction takes either a conditional
11586 suffix or an infix at character index 3. */
11587 OT_odd_infix_unc, /* This is the unconditional variant of an
11588 instruction that takes a conditional infix
11589 at an unusual position. In unified mode,
11590 this variant will accept a suffix. */
11591 OT_odd_infix_0 /* Values greater than or equal to OT_odd_infix_0
11592 are the conditional variants of instructions that
11593 take conditional infixes in unusual positions.
11594 The infix appears at character index
11595 (tag - OT_odd_infix_0). These are not accepted
11596 in unified mode. */
11599 /* Subroutine of md_assemble, responsible for looking up the primary
11600 opcode from the mnemonic the user wrote. STR points to the
11601 beginning of the mnemonic.
11603 This is not simply a hash table lookup, because of conditional
11604 variants. Most instructions have conditional variants, which are
11605 expressed with a _conditional affix_ to the mnemonic. If we were
11606 to encode each conditional variant as a literal string in the opcode
11607 table, it would have approximately 20,000 entries.
11609 Most mnemonics take this affix as a suffix, and in unified syntax,
11610 'most' is upgraded to 'all'. However, in the divided syntax, some
11611 instructions take the affix as an infix, notably the s-variants of
11612 the arithmetic instructions. Of those instructions, all but six
11613 have the infix appear after the third character of the mnemonic.
11615 Accordingly, the algorithm for looking up primary opcodes given
11618 1. Look up the identifier in the opcode table.
11619 If we find a match, go to step U.
11621 2. Look up the last two characters of the identifier in the
11622 conditions table. If we find a match, look up the first N-2
11623 characters of the identifier in the opcode table. If we
11624 find a match, go to step CE.
11626 3. Look up the fourth and fifth characters of the identifier in
11627 the conditions table. If we find a match, extract those
11628 characters from the identifier, and look up the remaining
11629 characters in the opcode table. If we find a match, go
11634 U. Examine the tag field of the opcode structure, in case this is
11635 one of the six instructions with its conditional infix in an
11636 unusual place. If it is, the tag tells us where to find the
11637 infix; look it up in the conditions table and set inst.cond
11638 accordingly. Otherwise, this is an unconditional instruction.
11639 Again set inst.cond accordingly. Return the opcode structure.
11641 CE. Examine the tag field to make sure this is an instruction that
11642 should receive a conditional suffix. If it is not, fail.
11643 Otherwise, set inst.cond from the suffix we already looked up,
11644 and return the opcode structure.
11646 CM. Examine the tag field to make sure this is an instruction that
11647 should receive a conditional infix after the third character.
11648 If it is not, fail. Otherwise, undo the edits to the current
11649 line of input and proceed as for case CE. */
11651 static const struct asm_opcode *
11652 opcode_lookup (char **str)
11656 const struct asm_opcode *opcode;
11657 const struct asm_cond *cond;
11660 /* Scan up to the end of the mnemonic, which must end in white space,
11661 '.' (in unified mode only), or end of string. */
11662 for (base = end = *str; *end != '\0'; end++)
11663 if (*end == ' ' || (unified_syntax && *end == '.'))
11669 /* Handle a possible width suffix and/or Neon type suffix. */
11676 else if (end[1] == 'n')
11681 inst.vectype.elems = 0;
11683 *str = end + offset;
11685 if (end[offset] == '.')
11687 /* See if we have a Neon type suffix. */
11688 if (!parse_neon_type (&inst.vectype, str))
11691 else if (end[offset] != '\0' && end[offset] != ' ')
11697 /* Look for unaffixed or special-case affixed mnemonic. */
11698 opcode = hash_find_n (arm_ops_hsh, base, end - base);
11702 if (opcode->tag < OT_odd_infix_0)
11704 inst.cond = COND_ALWAYS;
11708 if (unified_syntax)
11709 as_warn (_("conditional infixes are deprecated in unified syntax"));
11710 affix = base + (opcode->tag - OT_odd_infix_0);
11711 cond = hash_find_n (arm_cond_hsh, affix, 2);
11714 inst.cond = cond->value;
11718 /* Cannot have a conditional suffix on a mnemonic of less than two
11720 if (end - base < 3)
11723 /* Look for suffixed mnemonic. */
11725 cond = hash_find_n (arm_cond_hsh, affix, 2);
11726 opcode = hash_find_n (arm_ops_hsh, base, affix - base);
11727 if (opcode && cond)
11730 switch (opcode->tag)
11732 case OT_cinfix3_legacy:
11733 /* Ignore conditional suffixes matched on infix only mnemonics. */
11737 case OT_odd_infix_unc:
11738 if (!unified_syntax)
11740 /* else fall through */
11743 case OT_csuf_or_in3:
11744 inst.cond = cond->value;
11747 case OT_unconditional:
11748 case OT_unconditionalF:
11751 inst.cond = cond->value;
11755 /* delayed diagnostic */
11756 inst.error = BAD_COND;
11757 inst.cond = COND_ALWAYS;
11766 /* Cannot have a usual-position infix on a mnemonic of less than
11767 six characters (five would be a suffix). */
11768 if (end - base < 6)
11771 /* Look for infixed mnemonic in the usual position. */
11773 cond = hash_find_n (arm_cond_hsh, affix, 2);
11777 memcpy (save, affix, 2);
11778 memmove (affix, affix + 2, (end - affix) - 2);
11779 opcode = hash_find_n (arm_ops_hsh, base, (end - base) - 2);
11780 memmove (affix + 2, affix, (end - affix) - 2);
11781 memcpy (affix, save, 2);
11783 if (opcode && (opcode->tag == OT_cinfix3 || opcode->tag == OT_csuf_or_in3
11784 || opcode->tag == OT_cinfix3_legacy))
11787 if (unified_syntax && opcode->tag == OT_cinfix3)
11788 as_warn (_("conditional infixes are deprecated in unified syntax"));
11790 inst.cond = cond->value;
11798 md_assemble (char *str)
11801 const struct asm_opcode * opcode;
11803 /* Align the previous label if needed. */
11804 if (last_label_seen != NULL)
11806 symbol_set_frag (last_label_seen, frag_now);
11807 S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
11808 S_SET_SEGMENT (last_label_seen, now_seg);
11811 memset (&inst, '\0', sizeof (inst));
11812 inst.reloc.type = BFD_RELOC_UNUSED;
11814 opcode = opcode_lookup (&p);
11817 /* It wasn't an instruction, but it might be a register alias of
11818 the form alias .req reg. */
11819 if (!create_register_alias (str, p))
11820 as_bad (_("bad instruction `%s'"), str);
11827 arm_feature_set variant;
11829 variant = cpu_variant;
11830 /* Only allow coprocessor instructions on Thumb-2 capable devices. */
11831 if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
11832 ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
11833 /* Check that this instruction is supported for this CPU. */
11834 if (!opcode->tvariant
11835 || (thumb_mode == 1
11836 && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
11838 as_bad (_("selected processor does not support `%s'"), str);
11841 if (inst.cond != COND_ALWAYS && !unified_syntax
11842 && opcode->tencode != do_t_branch)
11844 as_bad (_("Thumb does not support conditional execution"));
11848 /* Check conditional suffixes. */
11849 if (current_it_mask)
11852 cond = current_cc ^ ((current_it_mask >> 4) & 1) ^ 1;
11853 current_it_mask <<= 1;
11854 current_it_mask &= 0x1f;
11855 /* The BKPT instruction is unconditional even in an IT block. */
11857 && cond != inst.cond && opcode->tencode != do_t_bkpt)
11859 as_bad (_("incorrect condition in IT block"));
11863 else if (inst.cond != COND_ALWAYS && opcode->tencode != do_t_branch)
11865 as_bad (_("thumb conditional instrunction not in IT block"));
11869 mapping_state (MAP_THUMB);
11870 inst.instruction = opcode->tvalue;
11872 if (!parse_operands (p, opcode->operands))
11873 opcode->tencode ();
11875 /* Clear current_it_mask at the end of an IT block. */
11876 if (current_it_mask == 0x10)
11877 current_it_mask = 0;
11879 if (!(inst.error || inst.relax))
11881 assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
11882 inst.size = (inst.instruction > 0xffff ? 4 : 2);
11883 if (inst.size_req && inst.size_req != inst.size)
11885 as_bad (_("cannot honor width suffix -- `%s'"), str);
11889 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11890 *opcode->tvariant);
11891 /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
11892 set those bits when Thumb-2 32-bit instructions are seen. ie.
11893 anything other than bl/blx.
11894 This is overly pessimistic for relaxable instructions. */
11895 if ((inst.size == 4 && (inst.instruction & 0xf800e800) != 0xf000e800)
11897 ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
11902 /* Check that this instruction is supported for this CPU. */
11903 if (!opcode->avariant ||
11904 !ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant))
11906 as_bad (_("selected processor does not support `%s'"), str);
11911 as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
11915 mapping_state (MAP_ARM);
11916 inst.instruction = opcode->avalue;
11917 if (opcode->tag == OT_unconditionalF)
11918 inst.instruction |= 0xF << 28;
11920 inst.instruction |= inst.cond << 28;
11921 inst.size = INSN_SIZE;
11922 if (!parse_operands (p, opcode->operands))
11923 opcode->aencode ();
11924 /* Arm mode bx is marked as both v4T and v5 because it's still required
11925 on a hypothetical non-thumb v5 core. */
11926 if (ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v4t)
11927 || ARM_CPU_HAS_FEATURE (*opcode->avariant, arm_ext_v5))
11928 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
11930 ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
11931 *opcode->avariant);
11936 /* Various frobbings of labels and their addresses. */
11939 arm_start_line_hook (void)
11941 last_label_seen = NULL;
11945 arm_frob_label (symbolS * sym)
11947 last_label_seen = sym;
11949 ARM_SET_THUMB (sym, thumb_mode);
11951 #if defined OBJ_COFF || defined OBJ_ELF
11952 ARM_SET_INTERWORK (sym, support_interwork);
11955 /* Note - do not allow local symbols (.Lxxx) to be labeled
11956 as Thumb functions. This is because these labels, whilst
11957 they exist inside Thumb code, are not the entry points for
11958 possible ARM->Thumb calls. Also, these labels can be used
11959 as part of a computed goto or switch statement. eg gcc
11960 can generate code that looks like this:
11962 ldr r2, [pc, .Laaa]
11972 The first instruction loads the address of the jump table.
11973 The second instruction converts a table index into a byte offset.
11974 The third instruction gets the jump address out of the table.
11975 The fourth instruction performs the jump.
11977 If the address stored at .Laaa is that of a symbol which has the
11978 Thumb_Func bit set, then the linker will arrange for this address
11979 to have the bottom bit set, which in turn would mean that the
11980 address computation performed by the third instruction would end
11981 up with the bottom bit set. Since the ARM is capable of unaligned
11982 word loads, the instruction would then load the incorrect address
11983 out of the jump table, and chaos would ensue. */
11984 if (label_is_thumb_function_name
11985 && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
11986 && (bfd_get_section_flags (stdoutput, now_seg) & SEC_CODE) != 0)
11988 /* When the address of a Thumb function is taken the bottom
11989 bit of that address should be set. This will allow
11990 interworking between Arm and Thumb functions to work
11993 THUMB_SET_FUNC (sym, 1);
11995 label_is_thumb_function_name = FALSE;
11999 dwarf2_emit_label (sym);
12004 arm_data_in_code (void)
12006 if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
12008 *input_line_pointer = '/';
12009 input_line_pointer += 5;
12010 *input_line_pointer = 0;
12018 arm_canonicalize_symbol_name (char * name)
12022 if (thumb_mode && (len = strlen (name)) > 5
12023 && streq (name + len - 5, "/data"))
12024 *(name + len - 5) = 0;
12029 /* Table of all register names defined by default. The user can
12030 define additional names with .req. Note that all register names
12031 should appear in both upper and lowercase variants. Some registers
12032 also have mixed-case names. */
12034 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
12035 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
12036 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
12037 #define REGSET(p,t) \
12038 REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
12039 REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
12040 REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
12041 REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
12042 #define REGSETH(p,t) \
12043 REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
12044 REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
12045 REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
12046 REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
12047 #define REGSET2(p,t) \
12048 REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
12049 REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
12050 REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
12051 REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
12053 static const struct reg_entry reg_names[] =
12055 /* ARM integer registers. */
12056 REGSET(r, RN), REGSET(R, RN),
12058 /* ATPCS synonyms. */
12059 REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
12060 REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
12061 REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
12063 REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
12064 REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
12065 REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
12067 /* Well-known aliases. */
12068 REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
12069 REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
12071 REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
12072 REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
12074 /* Coprocessor numbers. */
12075 REGSET(p, CP), REGSET(P, CP),
12077 /* Coprocessor register numbers. The "cr" variants are for backward
12079 REGSET(c, CN), REGSET(C, CN),
12080 REGSET(cr, CN), REGSET(CR, CN),
12082 /* FPA registers. */
12083 REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
12084 REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
12086 REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
12087 REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
12089 /* VFP SP registers. */
12090 REGSET(s,VFS), REGSET(S,VFS),
12091 REGSETH(s,VFS), REGSETH(S,VFS),
12093 /* VFP DP Registers. */
12094 REGSET(d,VFD), REGSET(D,VFD),
12095 /* Extra Neon DP registers. */
12096 REGSETH(d,VFD), REGSETH(D,VFD),
12098 /* Neon QP registers. */
12099 REGSET2(q,NQ), REGSET2(Q,NQ),
12101 /* VFP control registers. */
12102 REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
12103 REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
12105 /* Maverick DSP coprocessor registers. */
12106 REGSET(mvf,MVF), REGSET(mvd,MVD), REGSET(mvfx,MVFX), REGSET(mvdx,MVDX),
12107 REGSET(MVF,MVF), REGSET(MVD,MVD), REGSET(MVFX,MVFX), REGSET(MVDX,MVDX),
12109 REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
12110 REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
12111 REGDEF(dspsc,0,DSPSC),
12113 REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
12114 REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
12115 REGDEF(DSPSC,0,DSPSC),
12117 /* iWMMXt data registers - p0, c0-15. */
12118 REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
12120 /* iWMMXt control registers - p1, c0-3. */
12121 REGDEF(wcid, 0,MMXWC), REGDEF(wCID, 0,MMXWC), REGDEF(WCID, 0,MMXWC),
12122 REGDEF(wcon, 1,MMXWC), REGDEF(wCon, 1,MMXWC), REGDEF(WCON, 1,MMXWC),
12123 REGDEF(wcssf, 2,MMXWC), REGDEF(wCSSF, 2,MMXWC), REGDEF(WCSSF, 2,MMXWC),
12124 REGDEF(wcasf, 3,MMXWC), REGDEF(wCASF, 3,MMXWC), REGDEF(WCASF, 3,MMXWC),
12126 /* iWMMXt scalar (constant/offset) registers - p1, c8-11. */
12127 REGDEF(wcgr0, 8,MMXWCG), REGDEF(wCGR0, 8,MMXWCG), REGDEF(WCGR0, 8,MMXWCG),
12128 REGDEF(wcgr1, 9,MMXWCG), REGDEF(wCGR1, 9,MMXWCG), REGDEF(WCGR1, 9,MMXWCG),
12129 REGDEF(wcgr2,10,MMXWCG), REGDEF(wCGR2,10,MMXWCG), REGDEF(WCGR2,10,MMXWCG),
12130 REGDEF(wcgr3,11,MMXWCG), REGDEF(wCGR3,11,MMXWCG), REGDEF(WCGR3,11,MMXWCG),
12132 /* XScale accumulator registers. */
12133 REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
12139 /* Table of all PSR suffixes. Bare "CPSR" and "SPSR" are handled
12140 within psr_required_here. */
12141 static const struct asm_psr psrs[] =
12143 /* Backward compatibility notation. Note that "all" is no longer
12144 truly all possible PSR bits. */
12145 {"all", PSR_c | PSR_f},
12149 /* Individual flags. */
12154 /* Combinations of flags. */
12155 {"fs", PSR_f | PSR_s},
12156 {"fx", PSR_f | PSR_x},
12157 {"fc", PSR_f | PSR_c},
12158 {"sf", PSR_s | PSR_f},
12159 {"sx", PSR_s | PSR_x},
12160 {"sc", PSR_s | PSR_c},
12161 {"xf", PSR_x | PSR_f},
12162 {"xs", PSR_x | PSR_s},
12163 {"xc", PSR_x | PSR_c},
12164 {"cf", PSR_c | PSR_f},
12165 {"cs", PSR_c | PSR_s},
12166 {"cx", PSR_c | PSR_x},
12167 {"fsx", PSR_f | PSR_s | PSR_x},
12168 {"fsc", PSR_f | PSR_s | PSR_c},
12169 {"fxs", PSR_f | PSR_x | PSR_s},
12170 {"fxc", PSR_f | PSR_x | PSR_c},
12171 {"fcs", PSR_f | PSR_c | PSR_s},
12172 {"fcx", PSR_f | PSR_c | PSR_x},
12173 {"sfx", PSR_s | PSR_f | PSR_x},
12174 {"sfc", PSR_s | PSR_f | PSR_c},
12175 {"sxf", PSR_s | PSR_x | PSR_f},
12176 {"sxc", PSR_s | PSR_x | PSR_c},
12177 {"scf", PSR_s | PSR_c | PSR_f},
12178 {"scx", PSR_s | PSR_c | PSR_x},
12179 {"xfs", PSR_x | PSR_f | PSR_s},
12180 {"xfc", PSR_x | PSR_f | PSR_c},
12181 {"xsf", PSR_x | PSR_s | PSR_f},
12182 {"xsc", PSR_x | PSR_s | PSR_c},
12183 {"xcf", PSR_x | PSR_c | PSR_f},
12184 {"xcs", PSR_x | PSR_c | PSR_s},
12185 {"cfs", PSR_c | PSR_f | PSR_s},
12186 {"cfx", PSR_c | PSR_f | PSR_x},
12187 {"csf", PSR_c | PSR_s | PSR_f},
12188 {"csx", PSR_c | PSR_s | PSR_x},
12189 {"cxf", PSR_c | PSR_x | PSR_f},
12190 {"cxs", PSR_c | PSR_x | PSR_s},
12191 {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
12192 {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
12193 {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
12194 {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
12195 {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
12196 {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
12197 {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
12198 {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
12199 {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
12200 {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
12201 {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
12202 {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
12203 {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
12204 {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
12205 {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
12206 {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
12207 {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
12208 {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
12209 {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
12210 {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
12211 {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
12212 {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
12213 {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
12214 {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
12217 /* Table of V7M psr names. */
12218 static const struct asm_psr v7m_psrs[] =
12231 {"basepri_max", 18},
12236 /* Table of all shift-in-operand names. */
12237 static const struct asm_shift_name shift_names [] =
12239 { "asl", SHIFT_LSL }, { "ASL", SHIFT_LSL },
12240 { "lsl", SHIFT_LSL }, { "LSL", SHIFT_LSL },
12241 { "lsr", SHIFT_LSR }, { "LSR", SHIFT_LSR },
12242 { "asr", SHIFT_ASR }, { "ASR", SHIFT_ASR },
12243 { "ror", SHIFT_ROR }, { "ROR", SHIFT_ROR },
12244 { "rrx", SHIFT_RRX }, { "RRX", SHIFT_RRX }
12247 /* Table of all explicit relocation names. */
12249 static struct reloc_entry reloc_names[] =
12251 { "got", BFD_RELOC_ARM_GOT32 }, { "GOT", BFD_RELOC_ARM_GOT32 },
12252 { "gotoff", BFD_RELOC_ARM_GOTOFF }, { "GOTOFF", BFD_RELOC_ARM_GOTOFF },
12253 { "plt", BFD_RELOC_ARM_PLT32 }, { "PLT", BFD_RELOC_ARM_PLT32 },
12254 { "target1", BFD_RELOC_ARM_TARGET1 }, { "TARGET1", BFD_RELOC_ARM_TARGET1 },
12255 { "target2", BFD_RELOC_ARM_TARGET2 }, { "TARGET2", BFD_RELOC_ARM_TARGET2 },
12256 { "sbrel", BFD_RELOC_ARM_SBREL32 }, { "SBREL", BFD_RELOC_ARM_SBREL32 },
12257 { "tlsgd", BFD_RELOC_ARM_TLS_GD32}, { "TLSGD", BFD_RELOC_ARM_TLS_GD32},
12258 { "tlsldm", BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM", BFD_RELOC_ARM_TLS_LDM32},
12259 { "tlsldo", BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO", BFD_RELOC_ARM_TLS_LDO32},
12260 { "gottpoff",BFD_RELOC_ARM_TLS_IE32}, { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
12261 { "tpoff", BFD_RELOC_ARM_TLS_LE32}, { "TPOFF", BFD_RELOC_ARM_TLS_LE32}
12265 /* Table of all conditional affixes. 0xF is not defined as a condition code. */
12266 static const struct asm_cond conds[] =
12270 {"cs", 0x2}, {"hs", 0x2},
12271 {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
12285 static struct asm_barrier_opt barrier_opt_names[] =
12293 /* Table of ARM-format instructions. */
12295 /* Macros for gluing together operand strings. N.B. In all cases
12296 other than OPS0, the trailing OP_stop comes from default
12297 zero-initialization of the unspecified elements of the array. */
12298 #define OPS0() { OP_stop, }
12299 #define OPS1(a) { OP_##a, }
12300 #define OPS2(a,b) { OP_##a,OP_##b, }
12301 #define OPS3(a,b,c) { OP_##a,OP_##b,OP_##c, }
12302 #define OPS4(a,b,c,d) { OP_##a,OP_##b,OP_##c,OP_##d, }
12303 #define OPS5(a,b,c,d,e) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
12304 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
12306 /* These macros abstract out the exact format of the mnemonic table and
12307 save some repeated characters. */
12309 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix. */
12310 #define TxCE(mnem, op, top, nops, ops, ae, te) \
12311 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
12312 THUMB_VARIANT, do_##ae, do_##te }
12314 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
12315 a T_MNEM_xyz enumerator. */
12316 #define TCE(mnem, aop, top, nops, ops, ae, te) \
12317 TxCE(mnem, aop, 0x##top, nops, ops, ae, te)
12318 #define tCE(mnem, aop, top, nops, ops, ae, te) \
12319 TxCE(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12321 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
12322 infix after the third character. */
12323 #define TxC3(mnem, op, top, nops, ops, ae, te) \
12324 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
12325 THUMB_VARIANT, do_##ae, do_##te }
12326 #define TC3(mnem, aop, top, nops, ops, ae, te) \
12327 TxC3(mnem, aop, 0x##top, nops, ops, ae, te)
12328 #define tC3(mnem, aop, top, nops, ops, ae, te) \
12329 TxC3(mnem, aop, T_MNEM_##top, nops, ops, ae, te)
12331 /* Mnemonic with a conditional infix in an unusual place. Each and every variant has to
12332 appear in the condition table. */
12333 #define TxCM_(m1, m2, m3, op, top, nops, ops, ae, te) \
12334 { #m1 #m2 #m3, OPS##nops ops, sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12335 0x##op, top, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##te }
12337 #define TxCM(m1, m2, op, top, nops, ops, ae, te) \
12338 TxCM_(m1, , m2, op, top, nops, ops, ae, te), \
12339 TxCM_(m1, eq, m2, op, top, nops, ops, ae, te), \
12340 TxCM_(m1, ne, m2, op, top, nops, ops, ae, te), \
12341 TxCM_(m1, cs, m2, op, top, nops, ops, ae, te), \
12342 TxCM_(m1, hs, m2, op, top, nops, ops, ae, te), \
12343 TxCM_(m1, cc, m2, op, top, nops, ops, ae, te), \
12344 TxCM_(m1, ul, m2, op, top, nops, ops, ae, te), \
12345 TxCM_(m1, lo, m2, op, top, nops, ops, ae, te), \
12346 TxCM_(m1, mi, m2, op, top, nops, ops, ae, te), \
12347 TxCM_(m1, pl, m2, op, top, nops, ops, ae, te), \
12348 TxCM_(m1, vs, m2, op, top, nops, ops, ae, te), \
12349 TxCM_(m1, vc, m2, op, top, nops, ops, ae, te), \
12350 TxCM_(m1, hi, m2, op, top, nops, ops, ae, te), \
12351 TxCM_(m1, ls, m2, op, top, nops, ops, ae, te), \
12352 TxCM_(m1, ge, m2, op, top, nops, ops, ae, te), \
12353 TxCM_(m1, lt, m2, op, top, nops, ops, ae, te), \
12354 TxCM_(m1, gt, m2, op, top, nops, ops, ae, te), \
12355 TxCM_(m1, le, m2, op, top, nops, ops, ae, te), \
12356 TxCM_(m1, al, m2, op, top, nops, ops, ae, te)
12358 #define TCM(m1,m2, aop, top, nops, ops, ae, te) \
12359 TxCM(m1,m2, aop, 0x##top, nops, ops, ae, te)
12360 #define tCM(m1,m2, aop, top, nops, ops, ae, te) \
12361 TxCM(m1,m2, aop, T_MNEM_##top, nops, ops, ae, te)
12363 /* Mnemonic that cannot be conditionalized. The ARM condition-code
12364 field is still 0xE. Many of the Thumb variants can be executed
12365 conditionally, so this is checked separately. */
12366 #define TUE(mnem, op, top, nops, ops, ae, te) \
12367 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
12368 THUMB_VARIANT, do_##ae, do_##te }
12370 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
12371 condition code field. */
12372 #define TUF(mnem, op, top, nops, ops, ae, te) \
12373 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
12374 THUMB_VARIANT, do_##ae, do_##te }
12376 /* ARM-only variants of all the above. */
12377 #define CE(mnem, op, nops, ops, ae) \
12378 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12380 #define C3(mnem, op, nops, ops, ae) \
12381 { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12383 /* Legacy mnemonics that always have conditional infix after the third
12385 #define CL(mnem, op, nops, ops, ae) \
12386 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12387 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12389 /* Coprocessor instructions. Isomorphic between Arm and Thumb-2. */
12390 #define cCE(mnem, op, nops, ops, ae) \
12391 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12393 /* Legacy coprocessor instructions where conditional infix and conditional
12394 suffix are ambiguous. For consistency this includes all FPA instructions,
12395 not just the potentially ambiguous ones. */
12396 #define cCL(mnem, op, nops, ops, ae) \
12397 { #mnem, OPS##nops ops, OT_cinfix3_legacy, \
12398 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12400 /* Coprocessor, takes either a suffix or a position-3 infix
12401 (for an FPA corner case). */
12402 #define C3E(mnem, op, nops, ops, ae) \
12403 { #mnem, OPS##nops ops, OT_csuf_or_in3, \
12404 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae }
12406 #define xCM_(m1, m2, m3, op, nops, ops, ae) \
12407 { #m1 #m2 #m3, OPS##nops ops, \
12408 sizeof(#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof(#m1) - 1, \
12409 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL }
12411 #define CM(m1, m2, op, nops, ops, ae) \
12412 xCM_(m1, , m2, op, nops, ops, ae), \
12413 xCM_(m1, eq, m2, op, nops, ops, ae), \
12414 xCM_(m1, ne, m2, op, nops, ops, ae), \
12415 xCM_(m1, cs, m2, op, nops, ops, ae), \
12416 xCM_(m1, hs, m2, op, nops, ops, ae), \
12417 xCM_(m1, cc, m2, op, nops, ops, ae), \
12418 xCM_(m1, ul, m2, op, nops, ops, ae), \
12419 xCM_(m1, lo, m2, op, nops, ops, ae), \
12420 xCM_(m1, mi, m2, op, nops, ops, ae), \
12421 xCM_(m1, pl, m2, op, nops, ops, ae), \
12422 xCM_(m1, vs, m2, op, nops, ops, ae), \
12423 xCM_(m1, vc, m2, op, nops, ops, ae), \
12424 xCM_(m1, hi, m2, op, nops, ops, ae), \
12425 xCM_(m1, ls, m2, op, nops, ops, ae), \
12426 xCM_(m1, ge, m2, op, nops, ops, ae), \
12427 xCM_(m1, lt, m2, op, nops, ops, ae), \
12428 xCM_(m1, gt, m2, op, nops, ops, ae), \
12429 xCM_(m1, le, m2, op, nops, ops, ae), \
12430 xCM_(m1, al, m2, op, nops, ops, ae)
12432 #define UE(mnem, op, nops, ops, ae) \
12433 { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12435 #define UF(mnem, op, nops, ops, ae) \
12436 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL }
12438 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
12439 The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
12440 use the same encoding function for each. */
12441 #define NUF(mnem, op, nops, ops, enc) \
12442 { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op, \
12443 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12445 /* Neon data processing, version which indirects through neon_enc_tab for
12446 the various overloaded versions of opcodes. */
12447 #define nUF(mnem, op, nops, ops, enc) \
12448 { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM_##op, N_MNEM_##op, \
12449 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12451 /* Neon insn with conditional suffix for the ARM version, non-overloaded
12453 #define NCE(mnem, op, nops, ops, enc) \
12454 { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x##op, ARM_VARIANT, \
12455 THUMB_VARIANT, do_##enc, do_##enc }
12457 /* Neon insn with conditional suffix for the ARM version, overloaded types. */
12458 #define nCE(mnem, op, nops, ops, enc) \
12459 { #mnem, OPS##nops ops, OT_csuffix, N_MNEM_##op, N_MNEM_##op, \
12460 ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc }
12464 /* Thumb-only, unconditional. */
12465 #define UT(mnem, op, nops, ops, te) TUE(mnem, 0, op, nops, ops, 0, te)
12467 static const struct asm_opcode insns[] =
12469 #define ARM_VARIANT &arm_ext_v1 /* Core ARM Instructions. */
12470 #define THUMB_VARIANT &arm_ext_v4t
12471 tCE(and, 0000000, and, 3, (RR, oRR, SH), arit, t_arit3c),
12472 tC3(ands, 0100000, ands, 3, (RR, oRR, SH), arit, t_arit3c),
12473 tCE(eor, 0200000, eor, 3, (RR, oRR, SH), arit, t_arit3c),
12474 tC3(eors, 0300000, eors, 3, (RR, oRR, SH), arit, t_arit3c),
12475 tCE(sub, 0400000, sub, 3, (RR, oRR, SH), arit, t_add_sub),
12476 tC3(subs, 0500000, subs, 3, (RR, oRR, SH), arit, t_add_sub),
12477 tCE(add, 0800000, add, 3, (RR, oRR, SH), arit, t_add_sub),
12478 tC3(adds, 0900000, adds, 3, (RR, oRR, SH), arit, t_add_sub),
12479 tCE(adc, 0a00000, adc, 3, (RR, oRR, SH), arit, t_arit3c),
12480 tC3(adcs, 0b00000, adcs, 3, (RR, oRR, SH), arit, t_arit3c),
12481 tCE(sbc, 0c00000, sbc, 3, (RR, oRR, SH), arit, t_arit3),
12482 tC3(sbcs, 0d00000, sbcs, 3, (RR, oRR, SH), arit, t_arit3),
12483 tCE(orr, 1800000, orr, 3, (RR, oRR, SH), arit, t_arit3c),
12484 tC3(orrs, 1900000, orrs, 3, (RR, oRR, SH), arit, t_arit3c),
12485 tCE(bic, 1c00000, bic, 3, (RR, oRR, SH), arit, t_arit3),
12486 tC3(bics, 1d00000, bics, 3, (RR, oRR, SH), arit, t_arit3),
12488 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
12489 for setting PSR flag bits. They are obsolete in V6 and do not
12490 have Thumb equivalents. */
12491 tCE(tst, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
12492 tC3(tsts, 1100000, tst, 2, (RR, SH), cmp, t_mvn_tst),
12493 CL(tstp, 110f000, 2, (RR, SH), cmp),
12494 tCE(cmp, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
12495 tC3(cmps, 1500000, cmp, 2, (RR, SH), cmp, t_mov_cmp),
12496 CL(cmpp, 150f000, 2, (RR, SH), cmp),
12497 tCE(cmn, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
12498 tC3(cmns, 1700000, cmn, 2, (RR, SH), cmp, t_mvn_tst),
12499 CL(cmnp, 170f000, 2, (RR, SH), cmp),
12501 tCE(mov, 1a00000, mov, 2, (RR, SH), mov, t_mov_cmp),
12502 tC3(movs, 1b00000, movs, 2, (RR, SH), mov, t_mov_cmp),
12503 tCE(mvn, 1e00000, mvn, 2, (RR, SH), mov, t_mvn_tst),
12504 tC3(mvns, 1f00000, mvns, 2, (RR, SH), mov, t_mvn_tst),
12506 tCE(ldr, 4100000, ldr, 2, (RR, ADDR), ldst, t_ldst),
12507 tC3(ldrb, 4500000, ldrb, 2, (RR, ADDR), ldst, t_ldst),
12508 tCE(str, 4000000, str, 2, (RR, ADDR), ldst, t_ldst),
12509 tC3(strb, 4400000, strb, 2, (RR, ADDR), ldst, t_ldst),
12511 tCE(stm, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12512 tC3(stmia, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12513 tC3(stmea, 8800000, stmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12514 tCE(ldm, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12515 tC3(ldmia, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12516 tC3(ldmfd, 8900000, ldmia, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12518 TCE(swi, f000000, df00, 1, (EXPi), swi, t_swi),
12519 TCE(svc, f000000, df00, 1, (EXPi), swi, t_swi),
12520 tCE(b, a000000, b, 1, (EXPr), branch, t_branch),
12521 TCE(bl, b000000, f000f800, 1, (EXPr), bl, t_branch23),
12524 tCE(adr, 28f0000, adr, 2, (RR, EXP), adr, t_adr),
12525 C3(adrl, 28f0000, 2, (RR, EXP), adrl),
12526 tCE(nop, 1a00000, nop, 1, (oI255c), nop, t_nop),
12528 /* Thumb-compatibility pseudo ops. */
12529 tCE(lsl, 1a00000, lsl, 3, (RR, oRR, SH), shift, t_shift),
12530 tC3(lsls, 1b00000, lsls, 3, (RR, oRR, SH), shift, t_shift),
12531 tCE(lsr, 1a00020, lsr, 3, (RR, oRR, SH), shift, t_shift),
12532 tC3(lsrs, 1b00020, lsrs, 3, (RR, oRR, SH), shift, t_shift),
12533 tCE(asr, 1a00040, asr, 3, (RR, oRR, SH), shift, t_shift),
12534 tC3(asrs, 1b00040, asrs, 3, (RR, oRR, SH), shift, t_shift),
12535 tCE(ror, 1a00060, ror, 3, (RR, oRR, SH), shift, t_shift),
12536 tC3(rors, 1b00060, rors, 3, (RR, oRR, SH), shift, t_shift),
12537 tCE(neg, 2600000, neg, 2, (RR, RR), rd_rn, t_neg),
12538 tC3(negs, 2700000, negs, 2, (RR, RR), rd_rn, t_neg),
12539 tCE(push, 92d0000, push, 1, (REGLST), push_pop, t_push_pop),
12540 tCE(pop, 8bd0000, pop, 1, (REGLST), push_pop, t_push_pop),
12542 #undef THUMB_VARIANT
12543 #define THUMB_VARIANT &arm_ext_v6
12544 TCE(cpy, 1a00000, 4600, 2, (RR, RR), rd_rm, t_cpy),
12546 /* V1 instructions with no Thumb analogue prior to V6T2. */
12547 #undef THUMB_VARIANT
12548 #define THUMB_VARIANT &arm_ext_v6t2
12549 TCE(rsb, 0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
12550 TC3(rsbs, 0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
12551 TCE(teq, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
12552 TC3(teqs, 1300000, ea900f00, 2, (RR, SH), cmp, t_mvn_tst),
12553 CL(teqp, 130f000, 2, (RR, SH), cmp),
12555 TC3(ldrt, 4300000, f8500e00, 2, (RR, ADDR), ldstt, t_ldstt),
12556 TC3(ldrbt, 4700000, f8100e00, 2, (RR, ADDR), ldstt, t_ldstt),
12557 TC3(strt, 4200000, f8400e00, 2, (RR, ADDR), ldstt, t_ldstt),
12558 TC3(strbt, 4600000, f8000e00, 2, (RR, ADDR), ldstt, t_ldstt),
12560 TC3(stmdb, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12561 TC3(stmfd, 9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12563 TC3(ldmdb, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12564 TC3(ldmea, 9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
12566 /* V1 instructions with no Thumb analogue at all. */
12567 CE(rsc, 0e00000, 3, (RR, oRR, SH), arit),
12568 C3(rscs, 0f00000, 3, (RR, oRR, SH), arit),
12570 C3(stmib, 9800000, 2, (RRw, REGLST), ldmstm),
12571 C3(stmfa, 9800000, 2, (RRw, REGLST), ldmstm),
12572 C3(stmda, 8000000, 2, (RRw, REGLST), ldmstm),
12573 C3(stmed, 8000000, 2, (RRw, REGLST), ldmstm),
12574 C3(ldmib, 9900000, 2, (RRw, REGLST), ldmstm),
12575 C3(ldmed, 9900000, 2, (RRw, REGLST), ldmstm),
12576 C3(ldmda, 8100000, 2, (RRw, REGLST), ldmstm),
12577 C3(ldmfa, 8100000, 2, (RRw, REGLST), ldmstm),
12580 #define ARM_VARIANT &arm_ext_v2 /* ARM 2 - multiplies. */
12581 #undef THUMB_VARIANT
12582 #define THUMB_VARIANT &arm_ext_v4t
12583 tCE(mul, 0000090, mul, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
12584 tC3(muls, 0100090, muls, 3, (RRnpc, RRnpc, oRR), mul, t_mul),
12586 #undef THUMB_VARIANT
12587 #define THUMB_VARIANT &arm_ext_v6t2
12588 TCE(mla, 0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
12589 C3(mlas, 0300090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
12591 /* Generic coprocessor instructions. */
12592 TCE(cdp, e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
12593 TCE(ldc, c100000, ec100000, 3, (RCP, RCN, ADDR), lstc, lstc),
12594 TC3(ldcl, c500000, ec500000, 3, (RCP, RCN, ADDR), lstc, lstc),
12595 TCE(stc, c000000, ec000000, 3, (RCP, RCN, ADDR), lstc, lstc),
12596 TC3(stcl, c400000, ec400000, 3, (RCP, RCN, ADDR), lstc, lstc),
12597 TCE(mcr, e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
12598 TCE(mrc, e100010, ee100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
12601 #define ARM_VARIANT &arm_ext_v2s /* ARM 3 - swp instructions. */
12602 CE(swp, 1000090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
12603 C3(swpb, 1400090, 3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
12606 #define ARM_VARIANT &arm_ext_v3 /* ARM 6 Status register instructions. */
12607 TCE(mrs, 10f0000, f3ef8000, 2, (RR, PSR), mrs, t_mrs),
12608 TCE(msr, 120f000, f3808000, 2, (PSR, RR_EXi), msr, t_msr),
12611 #define ARM_VARIANT &arm_ext_v3m /* ARM 7M long multiplies. */
12612 TCE(smull, 0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
12613 CM(smull,s, 0d00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
12614 TCE(umull, 0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
12615 CM(umull,s, 0900090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
12616 TCE(smlal, 0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
12617 CM(smlal,s, 0f00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
12618 TCE(umlal, 0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
12619 CM(umlal,s, 0b00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
12622 #define ARM_VARIANT &arm_ext_v4 /* ARM Architecture 4. */
12623 #undef THUMB_VARIANT
12624 #define THUMB_VARIANT &arm_ext_v4t
12625 tC3(ldrh, 01000b0, ldrh, 2, (RR, ADDR), ldstv4, t_ldst),
12626 tC3(strh, 00000b0, strh, 2, (RR, ADDR), ldstv4, t_ldst),
12627 tC3(ldrsh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
12628 tC3(ldrsb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
12629 tCM(ld,sh, 01000f0, ldrsh, 2, (RR, ADDR), ldstv4, t_ldst),
12630 tCM(ld,sb, 01000d0, ldrsb, 2, (RR, ADDR), ldstv4, t_ldst),
12633 #define ARM_VARIANT &arm_ext_v4t_5
12634 /* ARM Architecture 4T. */
12635 /* Note: bx (and blx) are required on V5, even if the processor does
12636 not support Thumb. */
12637 TCE(bx, 12fff10, 4700, 1, (RR), bx, t_bx),
12640 #define ARM_VARIANT &arm_ext_v5 /* ARM Architecture 5T. */
12641 #undef THUMB_VARIANT
12642 #define THUMB_VARIANT &arm_ext_v5t
12643 /* Note: blx has 2 variants; the .value coded here is for
12644 BLX(2). Only this variant has conditional execution. */
12645 TCE(blx, 12fff30, 4780, 1, (RR_EXr), blx, t_blx),
12646 TUE(bkpt, 1200070, be00, 1, (oIffffb), bkpt, t_bkpt),
12648 #undef THUMB_VARIANT
12649 #define THUMB_VARIANT &arm_ext_v6t2
12650 TCE(clz, 16f0f10, fab0f080, 2, (RRnpc, RRnpc), rd_rm, t_clz),
12651 TUF(ldc2, c100000, fc100000, 3, (RCP, RCN, ADDR), lstc, lstc),
12652 TUF(ldc2l, c500000, fc500000, 3, (RCP, RCN, ADDR), lstc, lstc),
12653 TUF(stc2, c000000, fc000000, 3, (RCP, RCN, ADDR), lstc, lstc),
12654 TUF(stc2l, c400000, fc400000, 3, (RCP, RCN, ADDR), lstc, lstc),
12655 TUF(cdp2, e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp, cdp),
12656 TUF(mcr2, e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
12657 TUF(mrc2, e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b), co_reg, co_reg),
12660 #define ARM_VARIANT &arm_ext_v5exp /* ARM Architecture 5TExP. */
12661 TCE(smlabb, 1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
12662 TCE(smlatb, 10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
12663 TCE(smlabt, 10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
12664 TCE(smlatt, 10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
12666 TCE(smlawb, 1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
12667 TCE(smlawt, 12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smla, t_mla),
12669 TCE(smlalbb, 1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
12670 TCE(smlaltb, 14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
12671 TCE(smlalbt, 14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
12672 TCE(smlaltt, 14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc), smlal, t_mlal),
12674 TCE(smulbb, 1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12675 TCE(smultb, 16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12676 TCE(smulbt, 16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12677 TCE(smultt, 16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12679 TCE(smulwb, 12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12680 TCE(smulwt, 12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12682 TCE(qadd, 1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
12683 TCE(qdadd, 1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
12684 TCE(qsub, 1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
12685 TCE(qdsub, 1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc), rd_rm_rn, rd_rm_rn),
12688 #define ARM_VARIANT &arm_ext_v5e /* ARM Architecture 5TE. */
12689 TUF(pld, 450f000, f810f000, 1, (ADDR), pld, t_pld),
12690 TC3(ldrd, 00000d0, e9500000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
12691 TC3(strd, 00000f0, e9400000, 3, (RRnpc, oRRnpc, ADDR), ldrd, t_ldstd),
12693 TCE(mcrr, c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
12694 TCE(mrrc, c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
12697 #define ARM_VARIANT &arm_ext_v5j /* ARM Architecture 5TEJ. */
12698 TCE(bxj, 12fff20, f3c08f00, 1, (RR), bxj, t_bxj),
12701 #define ARM_VARIANT &arm_ext_v6 /* ARM V6. */
12702 #undef THUMB_VARIANT
12703 #define THUMB_VARIANT &arm_ext_v6
12704 TUF(cpsie, 1080000, b660, 2, (CPSF, oI31b), cpsi, t_cpsi),
12705 TUF(cpsid, 10c0000, b670, 2, (CPSF, oI31b), cpsi, t_cpsi),
12706 tCE(rev, 6bf0f30, rev, 2, (RRnpc, RRnpc), rd_rm, t_rev),
12707 tCE(rev16, 6bf0fb0, rev16, 2, (RRnpc, RRnpc), rd_rm, t_rev),
12708 tCE(revsh, 6ff0fb0, revsh, 2, (RRnpc, RRnpc), rd_rm, t_rev),
12709 tCE(sxth, 6bf0070, sxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
12710 tCE(uxth, 6ff0070, uxth, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
12711 tCE(sxtb, 6af0070, sxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
12712 tCE(uxtb, 6ef0070, uxtb, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
12713 TUF(setend, 1010000, b650, 1, (ENDI), setend, t_setend),
12715 #undef THUMB_VARIANT
12716 #define THUMB_VARIANT &arm_ext_v6t2
12717 TCE(ldrex, 1900f9f, e8500f00, 2, (RRnpc, ADDR), ldrex, t_ldrex),
12718 TUF(mcrr2, c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
12719 TUF(mrrc2, c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
12721 TCE(ssat, 6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat, t_ssat),
12722 TCE(usat, 6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat, t_usat),
12724 /* ARM V6 not included in V7M (eg. integer SIMD). */
12725 #undef THUMB_VARIANT
12726 #define THUMB_VARIANT &arm_ext_v6_notm
12727 TUF(cps, 1020000, f3af8100, 1, (I31b), imm0, t_cps),
12728 TCE(pkhbt, 6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll), pkhbt, t_pkhbt),
12729 TCE(pkhtb, 6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar), pkhtb, t_pkhtb),
12730 TCE(qadd16, 6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12731 TCE(qadd8, 6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12732 TCE(qaddsubx, 6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12733 TCE(qsub16, 6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12734 TCE(qsub8, 6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12735 TCE(qsubaddx, 6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12736 TCE(sadd16, 6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12737 TCE(sadd8, 6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12738 TCE(saddsubx, 6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12739 TCE(shadd16, 6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12740 TCE(shadd8, 6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12741 TCE(shaddsubx, 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12742 TCE(shsub16, 6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12743 TCE(shsub8, 6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12744 TCE(shsubaddx, 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12745 TCE(ssub16, 6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12746 TCE(ssub8, 6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12747 TCE(ssubaddx, 6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12748 TCE(uadd16, 6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12749 TCE(uadd8, 6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12750 TCE(uaddsubx, 6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12751 TCE(uhadd16, 6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12752 TCE(uhadd8, 6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12753 TCE(uhaddsubx, 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12754 TCE(uhsub16, 6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12755 TCE(uhsub8, 6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12756 TCE(uhsubaddx, 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12757 TCE(uqadd16, 6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12758 TCE(uqadd8, 6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12759 TCE(uqaddsubx, 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12760 TCE(uqsub16, 6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12761 TCE(uqsub8, 6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12762 TCE(uqsubaddx, 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12763 TCE(usub16, 6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12764 TCE(usub8, 6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12765 TCE(usubaddx, 6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12766 TUF(rfeia, 8900a00, e990c000, 1, (RRw), rfe, rfe),
12767 UF(rfeib, 9900a00, 1, (RRw), rfe),
12768 UF(rfeda, 8100a00, 1, (RRw), rfe),
12769 TUF(rfedb, 9100a00, e810c000, 1, (RRw), rfe, rfe),
12770 TUF(rfefd, 8900a00, e990c000, 1, (RRw), rfe, rfe),
12771 UF(rfefa, 9900a00, 1, (RRw), rfe),
12772 UF(rfeea, 8100a00, 1, (RRw), rfe),
12773 TUF(rfeed, 9100a00, e810c000, 1, (RRw), rfe, rfe),
12774 TCE(sxtah, 6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
12775 TCE(sxtab16, 6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
12776 TCE(sxtab, 6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
12777 TCE(sxtb16, 68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
12778 TCE(uxtah, 6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
12779 TCE(uxtab16, 6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
12780 TCE(uxtab, 6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
12781 TCE(uxtb16, 6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR), sxth, t_sxth),
12782 TCE(sel, 6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc), rd_rn_rm, t_simd),
12783 TCE(smlad, 7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12784 TCE(smladx, 7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12785 TCE(smlald, 7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
12786 TCE(smlaldx, 7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
12787 TCE(smlsd, 7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12788 TCE(smlsdx, 7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12789 TCE(smlsld, 7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
12790 TCE(smlsldx, 7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
12791 TCE(smmla, 7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12792 TCE(smmlar, 7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12793 TCE(smmls, 75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12794 TCE(smmlsr, 75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12795 TCE(smmul, 750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12796 TCE(smmulr, 750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12797 TCE(smuad, 700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12798 TCE(smuadx, 700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12799 TCE(smusd, 700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12800 TCE(smusdx, 700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12801 TUF(srsia, 8cd0500, e980c000, 1, (I31w), srs, srs),
12802 UF(srsib, 9cd0500, 1, (I31w), srs),
12803 UF(srsda, 84d0500, 1, (I31w), srs),
12804 TUF(srsdb, 94d0500, e800c000, 1, (I31w), srs, srs),
12805 TCE(ssat16, 6a00f30, f3200000, 3, (RRnpc, I16, RRnpc), ssat16, t_ssat16),
12806 TCE(strex, 1800f90, e8400000, 3, (RRnpc, RRnpc, ADDR), strex, t_strex),
12807 TCE(umaal, 0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal, t_mlal),
12808 TCE(usad8, 780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc), smul, t_simd),
12809 TCE(usada8, 7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
12810 TCE(usat16, 6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc), usat16, t_usat16),
12813 #define ARM_VARIANT &arm_ext_v6k
12814 #undef THUMB_VARIANT
12815 #define THUMB_VARIANT &arm_ext_v6k
12816 tCE(yield, 320f001, yield, 0, (), noargs, t_hint),
12817 tCE(wfe, 320f002, wfe, 0, (), noargs, t_hint),
12818 tCE(wfi, 320f003, wfi, 0, (), noargs, t_hint),
12819 tCE(sev, 320f004, sev, 0, (), noargs, t_hint),
12821 #undef THUMB_VARIANT
12822 #define THUMB_VARIANT &arm_ext_v6_notm
12823 TCE(ldrexd, 1b00f9f, e8d0007f, 3, (RRnpc, oRRnpc, RRnpcb), ldrexd, t_ldrexd),
12824 TCE(strexd, 1a00f90, e8c00070, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb), strexd, t_strexd),
12826 #undef THUMB_VARIANT
12827 #define THUMB_VARIANT &arm_ext_v6t2
12828 TCE(ldrexb, 1d00f9f, e8d00f4f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
12829 TCE(ldrexh, 1f00f9f, e8d00f5f, 2, (RRnpc, RRnpcb), rd_rn, rd_rn),
12830 TCE(strexb, 1c00f90, e8c00f40, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
12831 TCE(strexh, 1e00f90, e8c00f50, 3, (RRnpc, RRnpc, ADDR), strex, rm_rd_rn),
12832 TUF(clrex, 57ff01f, f3bf8f2f, 0, (), noargs, noargs),
12835 #define ARM_VARIANT &arm_ext_v6z
12836 TCE(smc, 1600070, f7f08000, 1, (EXPi), smc, t_smc),
12839 #define ARM_VARIANT &arm_ext_v6t2
12840 TCE(bfc, 7c0001f, f36f0000, 3, (RRnpc, I31, I32), bfc, t_bfc),
12841 TCE(bfi, 7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
12842 TCE(sbfx, 7a00050, f3400000, 4, (RR, RR, I31, I32), bfx, t_bfx),
12843 TCE(ubfx, 7e00050, f3c00000, 4, (RR, RR, I31, I32), bfx, t_bfx),
12845 TCE(mls, 0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
12846 TCE(movw, 3000000, f2400000, 2, (RRnpc, Iffff), mov16, t_mov16),
12847 TCE(movt, 3400000, f2c00000, 2, (RRnpc, Iffff), mov16, t_mov16),
12848 TCE(rbit, 3ff0f30, fa90f0a0, 2, (RR, RR), rd_rm, t_rbit),
12850 TC3(ldrht, 03000b0, f8300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
12851 TC3(ldrsht, 03000f0, f9300e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
12852 TC3(ldrsbt, 03000d0, f9100e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
12853 TC3(strht, 02000b0, f8200e00, 2, (RR, ADDR), ldsttv4, t_ldstt),
12855 UT(cbnz, b900, 2, (RR, EXP), t_czb),
12856 UT(cbz, b100, 2, (RR, EXP), t_czb),
12857 /* ARM does not really have an IT instruction. */
12858 TUE(it, 0, bf08, 1, (COND), it, t_it),
12859 TUE(itt, 0, bf0c, 1, (COND), it, t_it),
12860 TUE(ite, 0, bf04, 1, (COND), it, t_it),
12861 TUE(ittt, 0, bf0e, 1, (COND), it, t_it),
12862 TUE(itet, 0, bf06, 1, (COND), it, t_it),
12863 TUE(itte, 0, bf0a, 1, (COND), it, t_it),
12864 TUE(itee, 0, bf02, 1, (COND), it, t_it),
12865 TUE(itttt, 0, bf0f, 1, (COND), it, t_it),
12866 TUE(itett, 0, bf07, 1, (COND), it, t_it),
12867 TUE(ittet, 0, bf0b, 1, (COND), it, t_it),
12868 TUE(iteet, 0, bf03, 1, (COND), it, t_it),
12869 TUE(ittte, 0, bf0d, 1, (COND), it, t_it),
12870 TUE(itete, 0, bf05, 1, (COND), it, t_it),
12871 TUE(ittee, 0, bf09, 1, (COND), it, t_it),
12872 TUE(iteee, 0, bf01, 1, (COND), it, t_it),
12874 /* Thumb2 only instructions. */
12876 #define ARM_VARIANT NULL
12878 TCE(addw, 0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
12879 TCE(subw, 0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
12880 TCE(tbb, 0, e8d0f000, 1, (TB), 0, t_tb),
12881 TCE(tbh, 0, e8d0f010, 1, (TB), 0, t_tb),
12883 /* Thumb-2 hardware division instructions (R and M profiles only). */
12884 #undef THUMB_VARIANT
12885 #define THUMB_VARIANT &arm_ext_div
12886 TCE(sdiv, 0, fb90f0f0, 3, (RR, oRR, RR), 0, t_div),
12887 TCE(udiv, 0, fbb0f0f0, 3, (RR, oRR, RR), 0, t_div),
12889 /* ARM V7 instructions. */
12891 #define ARM_VARIANT &arm_ext_v7
12892 #undef THUMB_VARIANT
12893 #define THUMB_VARIANT &arm_ext_v7
12894 TUF(pli, 450f000, f910f000, 1, (ADDR), pli, t_pld),
12895 TCE(dbg, 320f0f0, f3af80f0, 1, (I15), dbg, t_dbg),
12896 TUF(dmb, 57ff050, f3bf8f50, 1, (oBARRIER), barrier, t_barrier),
12897 TUF(dsb, 57ff040, f3bf8f40, 1, (oBARRIER), barrier, t_barrier),
12898 TUF(isb, 57ff060, f3bf8f60, 1, (oBARRIER), barrier, t_barrier),
12901 #define ARM_VARIANT &fpu_fpa_ext_v1 /* Core FPA instruction set (V1). */
12902 cCE(wfs, e200110, 1, (RR), rd),
12903 cCE(rfs, e300110, 1, (RR), rd),
12904 cCE(wfc, e400110, 1, (RR), rd),
12905 cCE(rfc, e500110, 1, (RR), rd),
12907 cCL(ldfs, c100100, 2, (RF, ADDR), rd_cpaddr),
12908 cCL(ldfd, c108100, 2, (RF, ADDR), rd_cpaddr),
12909 cCL(ldfe, c500100, 2, (RF, ADDR), rd_cpaddr),
12910 cCL(ldfp, c508100, 2, (RF, ADDR), rd_cpaddr),
12912 cCL(stfs, c000100, 2, (RF, ADDR), rd_cpaddr),
12913 cCL(stfd, c008100, 2, (RF, ADDR), rd_cpaddr),
12914 cCL(stfe, c400100, 2, (RF, ADDR), rd_cpaddr),
12915 cCL(stfp, c408100, 2, (RF, ADDR), rd_cpaddr),
12917 cCL(mvfs, e008100, 2, (RF, RF_IF), rd_rm),
12918 cCL(mvfsp, e008120, 2, (RF, RF_IF), rd_rm),
12919 cCL(mvfsm, e008140, 2, (RF, RF_IF), rd_rm),
12920 cCL(mvfsz, e008160, 2, (RF, RF_IF), rd_rm),
12921 cCL(mvfd, e008180, 2, (RF, RF_IF), rd_rm),
12922 cCL(mvfdp, e0081a0, 2, (RF, RF_IF), rd_rm),
12923 cCL(mvfdm, e0081c0, 2, (RF, RF_IF), rd_rm),
12924 cCL(mvfdz, e0081e0, 2, (RF, RF_IF), rd_rm),
12925 cCL(mvfe, e088100, 2, (RF, RF_IF), rd_rm),
12926 cCL(mvfep, e088120, 2, (RF, RF_IF), rd_rm),
12927 cCL(mvfem, e088140, 2, (RF, RF_IF), rd_rm),
12928 cCL(mvfez, e088160, 2, (RF, RF_IF), rd_rm),
12930 cCL(mnfs, e108100, 2, (RF, RF_IF), rd_rm),
12931 cCL(mnfsp, e108120, 2, (RF, RF_IF), rd_rm),
12932 cCL(mnfsm, e108140, 2, (RF, RF_IF), rd_rm),
12933 cCL(mnfsz, e108160, 2, (RF, RF_IF), rd_rm),
12934 cCL(mnfd, e108180, 2, (RF, RF_IF), rd_rm),
12935 cCL(mnfdp, e1081a0, 2, (RF, RF_IF), rd_rm),
12936 cCL(mnfdm, e1081c0, 2, (RF, RF_IF), rd_rm),
12937 cCL(mnfdz, e1081e0, 2, (RF, RF_IF), rd_rm),
12938 cCL(mnfe, e188100, 2, (RF, RF_IF), rd_rm),
12939 cCL(mnfep, e188120, 2, (RF, RF_IF), rd_rm),
12940 cCL(mnfem, e188140, 2, (RF, RF_IF), rd_rm),
12941 cCL(mnfez, e188160, 2, (RF, RF_IF), rd_rm),
12943 cCL(abss, e208100, 2, (RF, RF_IF), rd_rm),
12944 cCL(abssp, e208120, 2, (RF, RF_IF), rd_rm),
12945 cCL(abssm, e208140, 2, (RF, RF_IF), rd_rm),
12946 cCL(abssz, e208160, 2, (RF, RF_IF), rd_rm),
12947 cCL(absd, e208180, 2, (RF, RF_IF), rd_rm),
12948 cCL(absdp, e2081a0, 2, (RF, RF_IF), rd_rm),
12949 cCL(absdm, e2081c0, 2, (RF, RF_IF), rd_rm),
12950 cCL(absdz, e2081e0, 2, (RF, RF_IF), rd_rm),
12951 cCL(abse, e288100, 2, (RF, RF_IF), rd_rm),
12952 cCL(absep, e288120, 2, (RF, RF_IF), rd_rm),
12953 cCL(absem, e288140, 2, (RF, RF_IF), rd_rm),
12954 cCL(absez, e288160, 2, (RF, RF_IF), rd_rm),
12956 cCL(rnds, e308100, 2, (RF, RF_IF), rd_rm),
12957 cCL(rndsp, e308120, 2, (RF, RF_IF), rd_rm),
12958 cCL(rndsm, e308140, 2, (RF, RF_IF), rd_rm),
12959 cCL(rndsz, e308160, 2, (RF, RF_IF), rd_rm),
12960 cCL(rndd, e308180, 2, (RF, RF_IF), rd_rm),
12961 cCL(rnddp, e3081a0, 2, (RF, RF_IF), rd_rm),
12962 cCL(rnddm, e3081c0, 2, (RF, RF_IF), rd_rm),
12963 cCL(rnddz, e3081e0, 2, (RF, RF_IF), rd_rm),
12964 cCL(rnde, e388100, 2, (RF, RF_IF), rd_rm),
12965 cCL(rndep, e388120, 2, (RF, RF_IF), rd_rm),
12966 cCL(rndem, e388140, 2, (RF, RF_IF), rd_rm),
12967 cCL(rndez, e388160, 2, (RF, RF_IF), rd_rm),
12969 cCL(sqts, e408100, 2, (RF, RF_IF), rd_rm),
12970 cCL(sqtsp, e408120, 2, (RF, RF_IF), rd_rm),
12971 cCL(sqtsm, e408140, 2, (RF, RF_IF), rd_rm),
12972 cCL(sqtsz, e408160, 2, (RF, RF_IF), rd_rm),
12973 cCL(sqtd, e408180, 2, (RF, RF_IF), rd_rm),
12974 cCL(sqtdp, e4081a0, 2, (RF, RF_IF), rd_rm),
12975 cCL(sqtdm, e4081c0, 2, (RF, RF_IF), rd_rm),
12976 cCL(sqtdz, e4081e0, 2, (RF, RF_IF), rd_rm),
12977 cCL(sqte, e488100, 2, (RF, RF_IF), rd_rm),
12978 cCL(sqtep, e488120, 2, (RF, RF_IF), rd_rm),
12979 cCL(sqtem, e488140, 2, (RF, RF_IF), rd_rm),
12980 cCL(sqtez, e488160, 2, (RF, RF_IF), rd_rm),
12982 cCL(logs, e508100, 2, (RF, RF_IF), rd_rm),
12983 cCL(logsp, e508120, 2, (RF, RF_IF), rd_rm),
12984 cCL(logsm, e508140, 2, (RF, RF_IF), rd_rm),
12985 cCL(logsz, e508160, 2, (RF, RF_IF), rd_rm),
12986 cCL(logd, e508180, 2, (RF, RF_IF), rd_rm),
12987 cCL(logdp, e5081a0, 2, (RF, RF_IF), rd_rm),
12988 cCL(logdm, e5081c0, 2, (RF, RF_IF), rd_rm),
12989 cCL(logdz, e5081e0, 2, (RF, RF_IF), rd_rm),
12990 cCL(loge, e588100, 2, (RF, RF_IF), rd_rm),
12991 cCL(logep, e588120, 2, (RF, RF_IF), rd_rm),
12992 cCL(logem, e588140, 2, (RF, RF_IF), rd_rm),
12993 cCL(logez, e588160, 2, (RF, RF_IF), rd_rm),
12995 cCL(lgns, e608100, 2, (RF, RF_IF), rd_rm),
12996 cCL(lgnsp, e608120, 2, (RF, RF_IF), rd_rm),
12997 cCL(lgnsm, e608140, 2, (RF, RF_IF), rd_rm),
12998 cCL(lgnsz, e608160, 2, (RF, RF_IF), rd_rm),
12999 cCL(lgnd, e608180, 2, (RF, RF_IF), rd_rm),
13000 cCL(lgndp, e6081a0, 2, (RF, RF_IF), rd_rm),
13001 cCL(lgndm, e6081c0, 2, (RF, RF_IF), rd_rm),
13002 cCL(lgndz, e6081e0, 2, (RF, RF_IF), rd_rm),
13003 cCL(lgne, e688100, 2, (RF, RF_IF), rd_rm),
13004 cCL(lgnep, e688120, 2, (RF, RF_IF), rd_rm),
13005 cCL(lgnem, e688140, 2, (RF, RF_IF), rd_rm),
13006 cCL(lgnez, e688160, 2, (RF, RF_IF), rd_rm),
13008 cCL(exps, e708100, 2, (RF, RF_IF), rd_rm),
13009 cCL(expsp, e708120, 2, (RF, RF_IF), rd_rm),
13010 cCL(expsm, e708140, 2, (RF, RF_IF), rd_rm),
13011 cCL(expsz, e708160, 2, (RF, RF_IF), rd_rm),
13012 cCL(expd, e708180, 2, (RF, RF_IF), rd_rm),
13013 cCL(expdp, e7081a0, 2, (RF, RF_IF), rd_rm),
13014 cCL(expdm, e7081c0, 2, (RF, RF_IF), rd_rm),
13015 cCL(expdz, e7081e0, 2, (RF, RF_IF), rd_rm),
13016 cCL(expe, e788100, 2, (RF, RF_IF), rd_rm),
13017 cCL(expep, e788120, 2, (RF, RF_IF), rd_rm),
13018 cCL(expem, e788140, 2, (RF, RF_IF), rd_rm),
13019 cCL(expdz, e788160, 2, (RF, RF_IF), rd_rm),
13021 cCL(sins, e808100, 2, (RF, RF_IF), rd_rm),
13022 cCL(sinsp, e808120, 2, (RF, RF_IF), rd_rm),
13023 cCL(sinsm, e808140, 2, (RF, RF_IF), rd_rm),
13024 cCL(sinsz, e808160, 2, (RF, RF_IF), rd_rm),
13025 cCL(sind, e808180, 2, (RF, RF_IF), rd_rm),
13026 cCL(sindp, e8081a0, 2, (RF, RF_IF), rd_rm),
13027 cCL(sindm, e8081c0, 2, (RF, RF_IF), rd_rm),
13028 cCL(sindz, e8081e0, 2, (RF, RF_IF), rd_rm),
13029 cCL(sine, e888100, 2, (RF, RF_IF), rd_rm),
13030 cCL(sinep, e888120, 2, (RF, RF_IF), rd_rm),
13031 cCL(sinem, e888140, 2, (RF, RF_IF), rd_rm),
13032 cCL(sinez, e888160, 2, (RF, RF_IF), rd_rm),
13034 cCL(coss, e908100, 2, (RF, RF_IF), rd_rm),
13035 cCL(cossp, e908120, 2, (RF, RF_IF), rd_rm),
13036 cCL(cossm, e908140, 2, (RF, RF_IF), rd_rm),
13037 cCL(cossz, e908160, 2, (RF, RF_IF), rd_rm),
13038 cCL(cosd, e908180, 2, (RF, RF_IF), rd_rm),
13039 cCL(cosdp, e9081a0, 2, (RF, RF_IF), rd_rm),
13040 cCL(cosdm, e9081c0, 2, (RF, RF_IF), rd_rm),
13041 cCL(cosdz, e9081e0, 2, (RF, RF_IF), rd_rm),
13042 cCL(cose, e988100, 2, (RF, RF_IF), rd_rm),
13043 cCL(cosep, e988120, 2, (RF, RF_IF), rd_rm),
13044 cCL(cosem, e988140, 2, (RF, RF_IF), rd_rm),
13045 cCL(cosez, e988160, 2, (RF, RF_IF), rd_rm),
13047 cCL(tans, ea08100, 2, (RF, RF_IF), rd_rm),
13048 cCL(tansp, ea08120, 2, (RF, RF_IF), rd_rm),
13049 cCL(tansm, ea08140, 2, (RF, RF_IF), rd_rm),
13050 cCL(tansz, ea08160, 2, (RF, RF_IF), rd_rm),
13051 cCL(tand, ea08180, 2, (RF, RF_IF), rd_rm),
13052 cCL(tandp, ea081a0, 2, (RF, RF_IF), rd_rm),
13053 cCL(tandm, ea081c0, 2, (RF, RF_IF), rd_rm),
13054 cCL(tandz, ea081e0, 2, (RF, RF_IF), rd_rm),
13055 cCL(tane, ea88100, 2, (RF, RF_IF), rd_rm),
13056 cCL(tanep, ea88120, 2, (RF, RF_IF), rd_rm),
13057 cCL(tanem, ea88140, 2, (RF, RF_IF), rd_rm),
13058 cCL(tanez, ea88160, 2, (RF, RF_IF), rd_rm),
13060 cCL(asns, eb08100, 2, (RF, RF_IF), rd_rm),
13061 cCL(asnsp, eb08120, 2, (RF, RF_IF), rd_rm),
13062 cCL(asnsm, eb08140, 2, (RF, RF_IF), rd_rm),
13063 cCL(asnsz, eb08160, 2, (RF, RF_IF), rd_rm),
13064 cCL(asnd, eb08180, 2, (RF, RF_IF), rd_rm),
13065 cCL(asndp, eb081a0, 2, (RF, RF_IF), rd_rm),
13066 cCL(asndm, eb081c0, 2, (RF, RF_IF), rd_rm),
13067 cCL(asndz, eb081e0, 2, (RF, RF_IF), rd_rm),
13068 cCL(asne, eb88100, 2, (RF, RF_IF), rd_rm),
13069 cCL(asnep, eb88120, 2, (RF, RF_IF), rd_rm),
13070 cCL(asnem, eb88140, 2, (RF, RF_IF), rd_rm),
13071 cCL(asnez, eb88160, 2, (RF, RF_IF), rd_rm),
13073 cCL(acss, ec08100, 2, (RF, RF_IF), rd_rm),
13074 cCL(acssp, ec08120, 2, (RF, RF_IF), rd_rm),
13075 cCL(acssm, ec08140, 2, (RF, RF_IF), rd_rm),
13076 cCL(acssz, ec08160, 2, (RF, RF_IF), rd_rm),
13077 cCL(acsd, ec08180, 2, (RF, RF_IF), rd_rm),
13078 cCL(acsdp, ec081a0, 2, (RF, RF_IF), rd_rm),
13079 cCL(acsdm, ec081c0, 2, (RF, RF_IF), rd_rm),
13080 cCL(acsdz, ec081e0, 2, (RF, RF_IF), rd_rm),
13081 cCL(acse, ec88100, 2, (RF, RF_IF), rd_rm),
13082 cCL(acsep, ec88120, 2, (RF, RF_IF), rd_rm),
13083 cCL(acsem, ec88140, 2, (RF, RF_IF), rd_rm),
13084 cCL(acsez, ec88160, 2, (RF, RF_IF), rd_rm),
13086 cCL(atns, ed08100, 2, (RF, RF_IF), rd_rm),
13087 cCL(atnsp, ed08120, 2, (RF, RF_IF), rd_rm),
13088 cCL(atnsm, ed08140, 2, (RF, RF_IF), rd_rm),
13089 cCL(atnsz, ed08160, 2, (RF, RF_IF), rd_rm),
13090 cCL(atnd, ed08180, 2, (RF, RF_IF), rd_rm),
13091 cCL(atndp, ed081a0, 2, (RF, RF_IF), rd_rm),
13092 cCL(atndm, ed081c0, 2, (RF, RF_IF), rd_rm),
13093 cCL(atndz, ed081e0, 2, (RF, RF_IF), rd_rm),
13094 cCL(atne, ed88100, 2, (RF, RF_IF), rd_rm),
13095 cCL(atnep, ed88120, 2, (RF, RF_IF), rd_rm),
13096 cCL(atnem, ed88140, 2, (RF, RF_IF), rd_rm),
13097 cCL(atnez, ed88160, 2, (RF, RF_IF), rd_rm),
13099 cCL(urds, ee08100, 2, (RF, RF_IF), rd_rm),
13100 cCL(urdsp, ee08120, 2, (RF, RF_IF), rd_rm),
13101 cCL(urdsm, ee08140, 2, (RF, RF_IF), rd_rm),
13102 cCL(urdsz, ee08160, 2, (RF, RF_IF), rd_rm),
13103 cCL(urdd, ee08180, 2, (RF, RF_IF), rd_rm),
13104 cCL(urddp, ee081a0, 2, (RF, RF_IF), rd_rm),
13105 cCL(urddm, ee081c0, 2, (RF, RF_IF), rd_rm),
13106 cCL(urddz, ee081e0, 2, (RF, RF_IF), rd_rm),
13107 cCL(urde, ee88100, 2, (RF, RF_IF), rd_rm),
13108 cCL(urdep, ee88120, 2, (RF, RF_IF), rd_rm),
13109 cCL(urdem, ee88140, 2, (RF, RF_IF), rd_rm),
13110 cCL(urdez, ee88160, 2, (RF, RF_IF), rd_rm),
13112 cCL(nrms, ef08100, 2, (RF, RF_IF), rd_rm),
13113 cCL(nrmsp, ef08120, 2, (RF, RF_IF), rd_rm),
13114 cCL(nrmsm, ef08140, 2, (RF, RF_IF), rd_rm),
13115 cCL(nrmsz, ef08160, 2, (RF, RF_IF), rd_rm),
13116 cCL(nrmd, ef08180, 2, (RF, RF_IF), rd_rm),
13117 cCL(nrmdp, ef081a0, 2, (RF, RF_IF), rd_rm),
13118 cCL(nrmdm, ef081c0, 2, (RF, RF_IF), rd_rm),
13119 cCL(nrmdz, ef081e0, 2, (RF, RF_IF), rd_rm),
13120 cCL(nrme, ef88100, 2, (RF, RF_IF), rd_rm),
13121 cCL(nrmep, ef88120, 2, (RF, RF_IF), rd_rm),
13122 cCL(nrmem, ef88140, 2, (RF, RF_IF), rd_rm),
13123 cCL(nrmez, ef88160, 2, (RF, RF_IF), rd_rm),
13125 cCL(adfs, e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
13126 cCL(adfsp, e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
13127 cCL(adfsm, e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
13128 cCL(adfsz, e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
13129 cCL(adfd, e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
13130 cCL(adfdp, e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13131 cCL(adfdm, e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13132 cCL(adfdz, e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13133 cCL(adfe, e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
13134 cCL(adfep, e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
13135 cCL(adfem, e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
13136 cCL(adfez, e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
13138 cCL(sufs, e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
13139 cCL(sufsp, e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
13140 cCL(sufsm, e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
13141 cCL(sufsz, e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
13142 cCL(sufd, e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
13143 cCL(sufdp, e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13144 cCL(sufdm, e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13145 cCL(sufdz, e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13146 cCL(sufe, e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
13147 cCL(sufep, e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
13148 cCL(sufem, e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
13149 cCL(sufez, e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
13151 cCL(rsfs, e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
13152 cCL(rsfsp, e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
13153 cCL(rsfsm, e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
13154 cCL(rsfsz, e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
13155 cCL(rsfd, e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
13156 cCL(rsfdp, e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13157 cCL(rsfdm, e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13158 cCL(rsfdz, e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13159 cCL(rsfe, e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
13160 cCL(rsfep, e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
13161 cCL(rsfem, e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
13162 cCL(rsfez, e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
13164 cCL(mufs, e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
13165 cCL(mufsp, e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
13166 cCL(mufsm, e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
13167 cCL(mufsz, e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
13168 cCL(mufd, e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
13169 cCL(mufdp, e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13170 cCL(mufdm, e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13171 cCL(mufdz, e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13172 cCL(mufe, e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
13173 cCL(mufep, e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
13174 cCL(mufem, e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
13175 cCL(mufez, e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
13177 cCL(dvfs, e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
13178 cCL(dvfsp, e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
13179 cCL(dvfsm, e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
13180 cCL(dvfsz, e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
13181 cCL(dvfd, e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
13182 cCL(dvfdp, e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13183 cCL(dvfdm, e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13184 cCL(dvfdz, e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13185 cCL(dvfe, e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
13186 cCL(dvfep, e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
13187 cCL(dvfem, e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
13188 cCL(dvfez, e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
13190 cCL(rdfs, e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
13191 cCL(rdfsp, e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
13192 cCL(rdfsm, e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
13193 cCL(rdfsz, e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
13194 cCL(rdfd, e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
13195 cCL(rdfdp, e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13196 cCL(rdfdm, e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13197 cCL(rdfdz, e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13198 cCL(rdfe, e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
13199 cCL(rdfep, e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
13200 cCL(rdfem, e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
13201 cCL(rdfez, e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
13203 cCL(pows, e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
13204 cCL(powsp, e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
13205 cCL(powsm, e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
13206 cCL(powsz, e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
13207 cCL(powd, e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
13208 cCL(powdp, e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13209 cCL(powdm, e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13210 cCL(powdz, e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13211 cCL(powe, e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
13212 cCL(powep, e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
13213 cCL(powem, e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
13214 cCL(powez, e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
13216 cCL(rpws, e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
13217 cCL(rpwsp, e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
13218 cCL(rpwsm, e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
13219 cCL(rpwsz, e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
13220 cCL(rpwd, e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
13221 cCL(rpwdp, e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13222 cCL(rpwdm, e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13223 cCL(rpwdz, e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13224 cCL(rpwe, e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
13225 cCL(rpwep, e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
13226 cCL(rpwem, e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
13227 cCL(rpwez, e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
13229 cCL(rmfs, e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
13230 cCL(rmfsp, e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
13231 cCL(rmfsm, e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
13232 cCL(rmfsz, e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
13233 cCL(rmfd, e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
13234 cCL(rmfdp, e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13235 cCL(rmfdm, e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13236 cCL(rmfdz, e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13237 cCL(rmfe, e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
13238 cCL(rmfep, e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
13239 cCL(rmfem, e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
13240 cCL(rmfez, e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
13242 cCL(fmls, e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
13243 cCL(fmlsp, e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
13244 cCL(fmlsm, e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
13245 cCL(fmlsz, e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
13246 cCL(fmld, e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
13247 cCL(fmldp, e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13248 cCL(fmldm, e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13249 cCL(fmldz, e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13250 cCL(fmle, e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
13251 cCL(fmlep, e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
13252 cCL(fmlem, e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
13253 cCL(fmlez, e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
13255 cCL(fdvs, ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13256 cCL(fdvsp, ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13257 cCL(fdvsm, ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13258 cCL(fdvsz, ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13259 cCL(fdvd, ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13260 cCL(fdvdp, ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13261 cCL(fdvdm, ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13262 cCL(fdvdz, ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13263 cCL(fdve, ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13264 cCL(fdvep, ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13265 cCL(fdvem, ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13266 cCL(fdvez, ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13268 cCL(frds, eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13269 cCL(frdsp, eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13270 cCL(frdsm, eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13271 cCL(frdsz, eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13272 cCL(frdd, eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13273 cCL(frddp, eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13274 cCL(frddm, eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13275 cCL(frddz, eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13276 cCL(frde, eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13277 cCL(frdep, eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13278 cCL(frdem, eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13279 cCL(frdez, eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13281 cCL(pols, ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
13282 cCL(polsp, ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
13283 cCL(polsm, ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
13284 cCL(polsz, ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
13285 cCL(pold, ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
13286 cCL(poldp, ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
13287 cCL(poldm, ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
13288 cCL(poldz, ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
13289 cCL(pole, ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
13290 cCL(polep, ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
13291 cCL(polem, ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
13292 cCL(polez, ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
13294 cCE(cmf, e90f110, 2, (RF, RF_IF), fpa_cmp),
13295 C3E(cmfe, ed0f110, 2, (RF, RF_IF), fpa_cmp),
13296 cCE(cnf, eb0f110, 2, (RF, RF_IF), fpa_cmp),
13297 C3E(cnfe, ef0f110, 2, (RF, RF_IF), fpa_cmp),
13299 cCL(flts, e000110, 2, (RF, RR), rn_rd),
13300 cCL(fltsp, e000130, 2, (RF, RR), rn_rd),
13301 cCL(fltsm, e000150, 2, (RF, RR), rn_rd),
13302 cCL(fltsz, e000170, 2, (RF, RR), rn_rd),
13303 cCL(fltd, e000190, 2, (RF, RR), rn_rd),
13304 cCL(fltdp, e0001b0, 2, (RF, RR), rn_rd),
13305 cCL(fltdm, e0001d0, 2, (RF, RR), rn_rd),
13306 cCL(fltdz, e0001f0, 2, (RF, RR), rn_rd),
13307 cCL(flte, e080110, 2, (RF, RR), rn_rd),
13308 cCL(fltep, e080130, 2, (RF, RR), rn_rd),
13309 cCL(fltem, e080150, 2, (RF, RR), rn_rd),
13310 cCL(fltez, e080170, 2, (RF, RR), rn_rd),
13312 /* The implementation of the FIX instruction is broken on some
13313 assemblers, in that it accepts a precision specifier as well as a
13314 rounding specifier, despite the fact that this is meaningless.
13315 To be more compatible, we accept it as well, though of course it
13316 does not set any bits. */
13317 cCE(fix, e100110, 2, (RR, RF), rd_rm),
13318 cCL(fixp, e100130, 2, (RR, RF), rd_rm),
13319 cCL(fixm, e100150, 2, (RR, RF), rd_rm),
13320 cCL(fixz, e100170, 2, (RR, RF), rd_rm),
13321 cCL(fixsp, e100130, 2, (RR, RF), rd_rm),
13322 cCL(fixsm, e100150, 2, (RR, RF), rd_rm),
13323 cCL(fixsz, e100170, 2, (RR, RF), rd_rm),
13324 cCL(fixdp, e100130, 2, (RR, RF), rd_rm),
13325 cCL(fixdm, e100150, 2, (RR, RF), rd_rm),
13326 cCL(fixdz, e100170, 2, (RR, RF), rd_rm),
13327 cCL(fixep, e100130, 2, (RR, RF), rd_rm),
13328 cCL(fixem, e100150, 2, (RR, RF), rd_rm),
13329 cCL(fixez, e100170, 2, (RR, RF), rd_rm),
13331 /* Instructions that were new with the real FPA, call them V2. */
13333 #define ARM_VARIANT &fpu_fpa_ext_v2
13334 cCE(lfm, c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13335 cCL(lfmfd, c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13336 cCL(lfmea, d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13337 cCE(sfm, c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13338 cCL(sfmfd, d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13339 cCL(sfmea, c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
13342 #define ARM_VARIANT &fpu_vfp_ext_v1xd /* VFP V1xD (single precision). */
13343 /* Moves and type conversions. */
13344 cCE(fcpys, eb00a40, 2, (RVS, RVS), vfp_sp_monadic),
13345 cCE(fmrs, e100a10, 2, (RR, RVS), vfp_reg_from_sp),
13346 cCE(fmsr, e000a10, 2, (RVS, RR), vfp_sp_from_reg),
13347 cCE(fmstat, ef1fa10, 0, (), noargs),
13348 cCE(fsitos, eb80ac0, 2, (RVS, RVS), vfp_sp_monadic),
13349 cCE(fuitos, eb80a40, 2, (RVS, RVS), vfp_sp_monadic),
13350 cCE(ftosis, ebd0a40, 2, (RVS, RVS), vfp_sp_monadic),
13351 cCE(ftosizs, ebd0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13352 cCE(ftouis, ebc0a40, 2, (RVS, RVS), vfp_sp_monadic),
13353 cCE(ftouizs, ebc0ac0, 2, (RVS, RVS), vfp_sp_monadic),
13354 cCE(fmrx, ef00a10, 2, (RR, RVC), rd_rn),
13355 cCE(fmxr, ee00a10, 2, (RVC, RR), rn_rd),
13357 /* Memory operations. */
13358 cCE(flds, d100a00, 2, (RVS, ADDR), vfp_sp_ldst),
13359 cCE(fsts, d000a00, 2, (RVS, ADDR), vfp_sp_ldst),
13360 cCE(fldmias, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13361 cCE(fldmfds, c900a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13362 cCE(fldmdbs, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13363 cCE(fldmeas, d300a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13364 cCE(fldmiax, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13365 cCE(fldmfdx, c900b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13366 cCE(fldmdbx, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13367 cCE(fldmeax, d300b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13368 cCE(fstmias, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13369 cCE(fstmeas, c800a00, 2, (RRw, VRSLST), vfp_sp_ldstmia),
13370 cCE(fstmdbs, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13371 cCE(fstmfds, d200a00, 2, (RRw, VRSLST), vfp_sp_ldstmdb),
13372 cCE(fstmiax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13373 cCE(fstmeax, c800b00, 2, (RRw, VRDLST), vfp_xp_ldstmia),
13374 cCE(fstmdbx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13375 cCE(fstmfdx, d200b00, 2, (RRw, VRDLST), vfp_xp_ldstmdb),
13377 /* Monadic operations. */
13378 cCE(fabss, eb00ac0, 2, (RVS, RVS), vfp_sp_monadic),
13379 cCE(fnegs, eb10a40, 2, (RVS, RVS), vfp_sp_monadic),
13380 cCE(fsqrts, eb10ac0, 2, (RVS, RVS), vfp_sp_monadic),
13382 /* Dyadic operations. */
13383 cCE(fadds, e300a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13384 cCE(fsubs, e300a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13385 cCE(fmuls, e200a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13386 cCE(fdivs, e800a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13387 cCE(fmacs, e000a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13388 cCE(fmscs, e100a00, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13389 cCE(fnmuls, e200a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13390 cCE(fnmacs, e000a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13391 cCE(fnmscs, e100a40, 3, (RVS, RVS, RVS), vfp_sp_dyadic),
13394 cCE(fcmps, eb40a40, 2, (RVS, RVS), vfp_sp_monadic),
13395 cCE(fcmpzs, eb50a40, 1, (RVS), vfp_sp_compare_z),
13396 cCE(fcmpes, eb40ac0, 2, (RVS, RVS), vfp_sp_monadic),
13397 cCE(fcmpezs, eb50ac0, 1, (RVS), vfp_sp_compare_z),
13400 #define ARM_VARIANT &fpu_vfp_ext_v1 /* VFP V1 (Double precision). */
13401 /* Moves and type conversions. */
13402 cCE(fcpyd, eb00b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13403 cCE(fcvtds, eb70ac0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13404 cCE(fcvtsd, eb70bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13405 cCE(fmdhr, e200b10, 2, (RVD, RR), vfp_dp_rn_rd),
13406 cCE(fmdlr, e000b10, 2, (RVD, RR), vfp_dp_rn_rd),
13407 cCE(fmrdh, e300b10, 2, (RR, RVD), vfp_dp_rd_rn),
13408 cCE(fmrdl, e100b10, 2, (RR, RVD), vfp_dp_rd_rn),
13409 cCE(fsitod, eb80bc0, 2, (RVD, RVS), vfp_dp_sp_cvt),
13410 cCE(fuitod, eb80b40, 2, (RVD, RVS), vfp_dp_sp_cvt),
13411 cCE(ftosid, ebd0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13412 cCE(ftosizd, ebd0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13413 cCE(ftouid, ebc0b40, 2, (RVS, RVD), vfp_sp_dp_cvt),
13414 cCE(ftouizd, ebc0bc0, 2, (RVS, RVD), vfp_sp_dp_cvt),
13416 /* Memory operations. */
13417 cCE(fldd, d100b00, 2, (RVD, ADDR), vfp_dp_ldst),
13418 cCE(fstd, d000b00, 2, (RVD, ADDR), vfp_dp_ldst),
13419 cCE(fldmiad, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13420 cCE(fldmfdd, c900b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13421 cCE(fldmdbd, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13422 cCE(fldmead, d300b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13423 cCE(fstmiad, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13424 cCE(fstmead, c800b00, 2, (RRw, VRDLST), vfp_dp_ldstmia),
13425 cCE(fstmdbd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13426 cCE(fstmfdd, d200b00, 2, (RRw, VRDLST), vfp_dp_ldstmdb),
13428 /* Monadic operations. */
13429 cCE(fabsd, eb00bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13430 cCE(fnegd, eb10b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13431 cCE(fsqrtd, eb10bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13433 /* Dyadic operations. */
13434 cCE(faddd, e300b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13435 cCE(fsubd, e300b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13436 cCE(fmuld, e200b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13437 cCE(fdivd, e800b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13438 cCE(fmacd, e000b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13439 cCE(fmscd, e100b00, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13440 cCE(fnmuld, e200b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13441 cCE(fnmacd, e000b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13442 cCE(fnmscd, e100b40, 3, (RVD, RVD, RVD), vfp_dp_rd_rn_rm),
13445 cCE(fcmpd, eb40b40, 2, (RVD, RVD), vfp_dp_rd_rm),
13446 cCE(fcmpzd, eb50b40, 1, (RVD), vfp_dp_rd),
13447 cCE(fcmped, eb40bc0, 2, (RVD, RVD), vfp_dp_rd_rm),
13448 cCE(fcmpezd, eb50bc0, 1, (RVD), vfp_dp_rd),
13451 #define ARM_VARIANT &fpu_vfp_ext_v2
13452 cCE(fmsrr, c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
13453 cCE(fmrrs, c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
13454 cCE(fmdrr, c400b10, 3, (RVD, RR, RR), vfp_dp_rm_rd_rn),
13455 cCE(fmrrd, c500b10, 3, (RR, RR, RVD), vfp_dp_rd_rn_rm),
13457 #undef THUMB_VARIANT
13458 #define THUMB_VARIANT &fpu_neon_ext_v1
13460 #define ARM_VARIANT &fpu_neon_ext_v1
13461 /* Data processing with three registers of the same length. */
13462 /* integer ops, valid types S8 S16 S32 U8 U16 U32. */
13463 NUF(vaba, 0000710, 3, (RNDQ, RNDQ, RNDQ), neon_dyadic_i_su),
13464 NUF(vabaq, 0000710, 3, (RNQ, RNQ, RNQ), neon_dyadic_i_su),
13465 NUF(vhadd, 0000000, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
13466 NUF(vhaddq, 0000000, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
13467 NUF(vrhadd, 0000100, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
13468 NUF(vrhaddq, 0000100, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
13469 NUF(vhsub, 0000200, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i_su),
13470 NUF(vhsubq, 0000200, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i_su),
13471 /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64. */
13472 NUF(vqadd, 0000010, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
13473 NUF(vqaddq, 0000010, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
13474 NUF(vqsub, 0000210, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
13475 NUF(vqsubq, 0000210, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
13476 NUF(vrshl, 0000500, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
13477 NUF(vrshlq, 0000500, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
13478 NUF(vqrshl, 0000510, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_i64_su),
13479 NUF(vqrshlq, 0000510, 3, (RNQ, oRNQ, RNQ), neon_dyadic_i64_su),
13480 /* If not immediate, fall back to neon_dyadic_i64_su.
13481 shl_imm should accept I8 I16 I32 I64,
13482 qshl_imm should accept S8 S16 S32 S64 U8 U16 U32 U64. */
13483 nUF(vshl, vshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_shl_imm),
13484 nUF(vshlq, vshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_shl_imm),
13485 nUF(vqshl, vqshl, 3, (RNDQ, oRNDQ, RNDQ_I63b), neon_qshl_imm),
13486 nUF(vqshlq, vqshl, 3, (RNQ, oRNQ, RNDQ_I63b), neon_qshl_imm),
13487 /* Logic ops, types optional & ignored. */
13488 nUF(vand, vand, 2, (RNDQ, NILO), neon_logic),
13489 nUF(vandq, vand, 2, (RNQ, NILO), neon_logic),
13490 nUF(vbic, vbic, 2, (RNDQ, NILO), neon_logic),
13491 nUF(vbicq, vbic, 2, (RNQ, NILO), neon_logic),
13492 nUF(vorr, vorr, 2, (RNDQ, NILO), neon_logic),
13493 nUF(vorrq, vorr, 2, (RNQ, NILO), neon_logic),
13494 nUF(vorn, vorn, 2, (RNDQ, NILO), neon_logic),
13495 nUF(vornq, vorn, 2, (RNQ, NILO), neon_logic),
13496 nUF(veor, veor, 3, (RNDQ, oRNDQ, RNDQ), neon_logic),
13497 nUF(veorq, veor, 3, (RNQ, oRNQ, RNQ), neon_logic),
13498 /* Bitfield ops, untyped. */
13499 NUF(vbsl, 1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
13500 NUF(vbslq, 1100110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
13501 NUF(vbit, 1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
13502 NUF(vbitq, 1200110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
13503 NUF(vbif, 1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
13504 NUF(vbifq, 1300110, 3, (RNQ, RNQ, RNQ), neon_bitfield),
13505 /* Int and float variants, types S8 S16 S32 U8 U16 U32 F32. */
13506 nUF(vabd, vabd, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
13507 nUF(vabdq, vabd, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
13508 nUF(vmax, vmax, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
13509 nUF(vmaxq, vmax, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
13510 nUF(vmin, vmin, 3, (RNDQ, oRNDQ, RNDQ), neon_dyadic_if_su),
13511 nUF(vminq, vmin, 3, (RNQ, oRNQ, RNQ), neon_dyadic_if_su),
13512 /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
13513 back to neon_dyadic_if_su. */
13514 nUF(vcge, vcge, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
13515 nUF(vcgeq, vcge, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
13516 nUF(vcgt, vcgt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
13517 nUF(vcgtq, vcgt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp),
13518 nUF(vclt, vclt, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
13519 nUF(vcltq, vclt, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
13520 nUF(vcle, vcle, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
13521 nUF(vcleq, vcle, 3, (RNQ, oRNQ, RNDQ_I0), neon_cmp_inv),
13522 /* Comparison. Type I8 I16 I32 F32. Non-immediate -> neon_dyadic_if_i. */
13523 nUF(vceq, vceq, 3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
13524 nUF(vceqq, vceq, 3, (RNQ, oRNQ, RNDQ_I0), neon_ceq),
13525 /* As above, D registers only. */
13526 nUF(vpmax, vpmax, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
13527 nUF(vpmin, vpmin, 3, (RND, oRND, RND), neon_dyadic_if_su_d),
13528 /* Int and float variants, signedness unimportant. */
13529 /* If not scalar, fall back to neon_dyadic_if_i. */
13530 nUF(vmla, vmla, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
13531 nUF(vmlaq, vmla, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
13532 nUF(vmls, vmls, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mac_maybe_scalar),
13533 nUF(vmlsq, vmls, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mac_maybe_scalar),
13534 nUF(vpadd, vpadd, 3, (RND, oRND, RND), neon_dyadic_if_i_d),
13535 /* Add/sub take types I8 I16 I32 I64 F32. */
13536 nUF(vadd, vadd, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
13537 nUF(vaddq, vadd, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
13538 nUF(vsub, vsub, 3, (RNDQ, oRNDQ, RNDQ), neon_addsub_if_i),
13539 nUF(vsubq, vsub, 3, (RNQ, oRNQ, RNQ), neon_addsub_if_i),
13540 /* vtst takes sizes 8, 16, 32. */
13541 NUF(vtst, 0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
13542 NUF(vtstq, 0000810, 3, (RNQ, oRNQ, RNQ), neon_tst),
13543 /* VMUL takes I8 I16 I32 F32 P8. */
13544 nUF(vmul, vmul, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_mul),
13545 nUF(vmulq, vmul, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_mul),
13546 /* VQD{R}MULH takes S16 S32. */
13547 nUF(vqdmulh, vqdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
13548 nUF(vqdmulhq, vqdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
13549 nUF(vqrdmulh, vqrdmulh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qdmulh),
13550 nUF(vqrdmulhq, vqrdmulh, 3, (RNQ, oRNQ, RNDQ_RNSC), neon_qdmulh),
13551 NUF(vacge, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
13552 NUF(vacgeq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
13553 NUF(vacgt, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
13554 NUF(vacgtq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute),
13555 NUF(vaclt, 0000e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
13556 NUF(vacltq, 0000e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
13557 NUF(vacle, 0200e10, 3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
13558 NUF(vacleq, 0200e10, 3, (RNQ, oRNQ, RNQ), neon_fcmp_absolute_inv),
13559 NUF(vrecps, 0000f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
13560 NUF(vrecpsq, 0000f10, 3, (RNQ, oRNQ, RNQ), neon_step),
13561 NUF(vrsqrts, 0200f10, 3, (RNDQ, oRNDQ, RNDQ), neon_step),
13562 NUF(vrsqrtsq, 0200f10, 3, (RNQ, oRNQ, RNQ), neon_step),
13564 /* Two address, int/float. Types S8 S16 S32 F32. */
13565 NUF(vabs, 1b10300, 2, (RNDQ, RNDQ), neon_abs_neg),
13566 NUF(vabsq, 1b10300, 2, (RNQ, RNQ), neon_abs_neg),
13567 NUF(vneg, 1b10380, 2, (RNDQ, RNDQ), neon_abs_neg),
13568 NUF(vnegq, 1b10380, 2, (RNQ, RNQ), neon_abs_neg),
13570 /* Data processing with two registers and a shift amount. */
13571 /* Right shifts, and variants with rounding.
13572 Types accepted S8 S16 S32 S64 U8 U16 U32 U64. */
13573 NUF(vshr, 0800010, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
13574 NUF(vshrq, 0800010, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
13575 NUF(vrshr, 0800210, 3, (RNDQ, oRNDQ, I64z), neon_rshift_round_imm),
13576 NUF(vrshrq, 0800210, 3, (RNQ, oRNQ, I64z), neon_rshift_round_imm),
13577 NUF(vsra, 0800110, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
13578 NUF(vsraq, 0800110, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
13579 NUF(vrsra, 0800310, 3, (RNDQ, oRNDQ, I64), neon_rshift_round_imm),
13580 NUF(vrsraq, 0800310, 3, (RNQ, oRNQ, I64), neon_rshift_round_imm),
13581 /* Shift and insert. Sizes accepted 8 16 32 64. */
13582 NUF(vsli, 1800510, 3, (RNDQ, oRNDQ, I63), neon_sli),
13583 NUF(vsliq, 1800510, 3, (RNQ, oRNQ, I63), neon_sli),
13584 NUF(vsri, 1800410, 3, (RNDQ, oRNDQ, I64), neon_sri),
13585 NUF(vsriq, 1800410, 3, (RNQ, oRNQ, I64), neon_sri),
13586 /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64. */
13587 NUF(vqshlu, 1800610, 3, (RNDQ, oRNDQ, I63), neon_qshlu_imm),
13588 NUF(vqshluq, 1800610, 3, (RNQ, oRNQ, I63), neon_qshlu_imm),
13589 /* Right shift immediate, saturating & narrowing, with rounding variants.
13590 Types accepted S16 S32 S64 U16 U32 U64. */
13591 NUF(vqshrn, 0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
13592 NUF(vqrshrn, 0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
13593 /* As above, unsigned. Types accepted S16 S32 S64. */
13594 NUF(vqshrun, 0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
13595 NUF(vqrshrun, 0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
13596 /* Right shift narrowing. Types accepted I16 I32 I64. */
13597 NUF(vshrn, 0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
13598 NUF(vrshrn, 0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
13599 /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant. */
13600 nUF(vshll, vshll, 3, (RNQ, RND, I32), neon_shll),
13601 /* CVT with optional immediate for fixed-point variant. */
13602 nUF(vcvt, vcvt, 3, (RNDQ, RNDQ, oI32b), neon_cvt),
13603 nUF(vcvtq, vcvt, 3, (RNQ, RNQ, oI32b), neon_cvt),
13605 /* One register and an immediate value. All encoding special-cased! */
13606 NCE(vmov, 0, 1, (VMOV), neon_mov),
13607 NCE(vmovq, 0, 1, (VMOV), neon_mov),
13608 nUF(vmvn, vmvn, 2, (RNDQ, RNDQ_IMVNb), neon_mvn),
13609 nUF(vmvnq, vmvn, 2, (RNQ, RNDQ_IMVNb), neon_mvn),
13611 /* Data processing, three registers of different lengths. */
13612 /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32. */
13613 NUF(vabal, 0800500, 3, (RNQ, RND, RND), neon_abal),
13614 NUF(vabdl, 0800700, 3, (RNQ, RND, RND), neon_dyadic_long),
13615 NUF(vaddl, 0800000, 3, (RNQ, RND, RND), neon_dyadic_long),
13616 NUF(vsubl, 0800200, 3, (RNQ, RND, RND), neon_dyadic_long),
13617 /* If not scalar, fall back to neon_dyadic_long.
13618 Vector types as above, scalar types S16 S32 U16 U32. */
13619 nUF(vmlal, vmlal, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
13620 nUF(vmlsl, vmlsl, 3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
13621 /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32. */
13622 NUF(vaddw, 0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
13623 NUF(vsubw, 0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
13624 /* Dyadic, narrowing insns. Types I16 I32 I64. */
13625 NUF(vaddhn, 0800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
13626 NUF(vraddhn, 1800400, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
13627 NUF(vsubhn, 0800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
13628 NUF(vrsubhn, 1800600, 3, (RND, RNQ, RNQ), neon_dyadic_narrow),
13629 /* Saturating doubling multiplies. Types S16 S32. */
13630 nUF(vqdmlal, vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
13631 nUF(vqdmlsl, vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
13632 nUF(vqdmull, vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
13633 /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
13634 S16 S32 U16 U32. */
13635 nUF(vmull, vmull, 3, (RNQ, RND, RND_RNSC), neon_vmull),
13637 /* Extract. Size 8. */
13638 NUF(vext, 0b00000, 4, (RNDQ, oRNDQ, RNDQ, I7), neon_ext),
13639 NUF(vextq, 0b00000, 4, (RNQ, oRNQ, RNQ, I7), neon_ext),
13641 /* Two registers, miscellaneous. */
13642 /* Reverse. Sizes 8 16 32 (must be < size in opcode). */
13643 NUF(vrev64, 1b00000, 2, (RNDQ, RNDQ), neon_rev),
13644 NUF(vrev64q, 1b00000, 2, (RNQ, RNQ), neon_rev),
13645 NUF(vrev32, 1b00080, 2, (RNDQ, RNDQ), neon_rev),
13646 NUF(vrev32q, 1b00080, 2, (RNQ, RNQ), neon_rev),
13647 NUF(vrev16, 1b00100, 2, (RNDQ, RNDQ), neon_rev),
13648 NUF(vrev16q, 1b00100, 2, (RNQ, RNQ), neon_rev),
13649 /* Vector replicate. Sizes 8 16 32. */
13650 nCE(vdup, vdup, 2, (RNDQ, RR_RNSC), neon_dup),
13651 nCE(vdupq, vdup, 2, (RNQ, RR_RNSC), neon_dup),
13652 /* VMOVL. Types S8 S16 S32 U8 U16 U32. */
13653 NUF(vmovl, 0800a10, 2, (RNQ, RND), neon_movl),
13654 /* VMOVN. Types I16 I32 I64. */
13655 nUF(vmovn, vmovn, 2, (RND, RNQ), neon_movn),
13656 /* VQMOVN. Types S16 S32 S64 U16 U32 U64. */
13657 nUF(vqmovn, vqmovn, 2, (RND, RNQ), neon_qmovn),
13658 /* VQMOVUN. Types S16 S32 S64. */
13659 nUF(vqmovun, vqmovun, 2, (RND, RNQ), neon_qmovun),
13660 /* VZIP / VUZP. Sizes 8 16 32. */
13661 NUF(vzip, 1b20180, 2, (RNDQ, RNDQ), neon_zip_uzp),
13662 NUF(vzipq, 1b20180, 2, (RNQ, RNQ), neon_zip_uzp),
13663 NUF(vuzp, 1b20100, 2, (RNDQ, RNDQ), neon_zip_uzp),
13664 NUF(vuzpq, 1b20100, 2, (RNQ, RNQ), neon_zip_uzp),
13665 /* VQABS / VQNEG. Types S8 S16 S32. */
13666 NUF(vqabs, 1b00700, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
13667 NUF(vqabsq, 1b00700, 2, (RNQ, RNQ), neon_sat_abs_neg),
13668 NUF(vqneg, 1b00780, 2, (RNDQ, RNDQ), neon_sat_abs_neg),
13669 NUF(vqnegq, 1b00780, 2, (RNQ, RNQ), neon_sat_abs_neg),
13670 /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32. */
13671 NUF(vpadal, 1b00600, 2, (RNDQ, RNDQ), neon_pair_long),
13672 NUF(vpadalq, 1b00600, 2, (RNQ, RNQ), neon_pair_long),
13673 NUF(vpaddl, 1b00200, 2, (RNDQ, RNDQ), neon_pair_long),
13674 NUF(vpaddlq, 1b00200, 2, (RNQ, RNQ), neon_pair_long),
13675 /* Reciprocal estimates. Types U32 F32. */
13676 NUF(vrecpe, 1b30400, 2, (RNDQ, RNDQ), neon_recip_est),
13677 NUF(vrecpeq, 1b30400, 2, (RNQ, RNQ), neon_recip_est),
13678 NUF(vrsqrte, 1b30480, 2, (RNDQ, RNDQ), neon_recip_est),
13679 NUF(vrsqrteq, 1b30480, 2, (RNQ, RNQ), neon_recip_est),
13680 /* VCLS. Types S8 S16 S32. */
13681 NUF(vcls, 1b00400, 2, (RNDQ, RNDQ), neon_cls),
13682 NUF(vclsq, 1b00400, 2, (RNQ, RNQ), neon_cls),
13683 /* VCLZ. Types I8 I16 I32. */
13684 NUF(vclz, 1b00480, 2, (RNDQ, RNDQ), neon_clz),
13685 NUF(vclzq, 1b00480, 2, (RNQ, RNQ), neon_clz),
13686 /* VCNT. Size 8. */
13687 NUF(vcnt, 1b00500, 2, (RNDQ, RNDQ), neon_cnt),
13688 NUF(vcntq, 1b00500, 2, (RNQ, RNQ), neon_cnt),
13689 /* Two address, untyped. */
13690 NUF(vswp, 1b20000, 2, (RNDQ, RNDQ), neon_swp),
13691 NUF(vswpq, 1b20000, 2, (RNQ, RNQ), neon_swp),
13692 /* VTRN. Sizes 8 16 32. */
13693 nUF(vtrn, vtrn, 2, (RNDQ, RNDQ), neon_trn),
13694 nUF(vtrnq, vtrn, 2, (RNQ, RNQ), neon_trn),
13696 /* Table lookup. Size 8. */
13697 NUF(vtbl, 1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
13698 NUF(vtbx, 1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
13700 #undef THUMB_VARIANT
13701 #define THUMB_VARIANT &fpu_vfp_v3_or_neon_ext
13703 #define ARM_VARIANT &fpu_vfp_v3_or_neon_ext
13705 /* Load/store instructions. Available in Neon or VFPv3. */
13706 NCE(vldm, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
13707 NCE(vldmia, c900b00, 2, (RRw, NRDLST), neon_ldm_stm),
13708 NCE(vldmdb, d100b00, 2, (RRw, NRDLST), neon_ldm_stm),
13709 NCE(vstm, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
13710 NCE(vstmia, c800b00, 2, (RRw, NRDLST), neon_ldm_stm),
13711 NCE(vstmdb, d000b00, 2, (RRw, NRDLST), neon_ldm_stm),
13712 NCE(vldr, d100b00, 2, (RND, ADDR), neon_ldr_str),
13713 NCE(vstr, d000b00, 2, (RND, ADDR), neon_ldr_str),
13715 /* Neon element/structure load/store. */
13716 nUF(vld1, vld1, 2, (NSTRLST, ADDR), neon_ldx_stx),
13717 nUF(vst1, vst1, 2, (NSTRLST, ADDR), neon_ldx_stx),
13718 nUF(vld2, vld2, 2, (NSTRLST, ADDR), neon_ldx_stx),
13719 nUF(vst2, vst2, 2, (NSTRLST, ADDR), neon_ldx_stx),
13720 nUF(vld3, vld3, 2, (NSTRLST, ADDR), neon_ldx_stx),
13721 nUF(vst3, vst3, 2, (NSTRLST, ADDR), neon_ldx_stx),
13722 nUF(vld4, vld4, 2, (NSTRLST, ADDR), neon_ldx_stx),
13723 nUF(vst4, vst4, 2, (NSTRLST, ADDR), neon_ldx_stx),
13725 #undef THUMB_VARIANT
13726 #define THUMB_VARIANT &fpu_vfp_ext_v3
13728 #define ARM_VARIANT &fpu_vfp_ext_v3
13730 cCE(fconsts, eb00a00, 2, (RVS, I255), vfp_sp_const),
13731 cCE(fconstd, eb00b00, 2, (RVD, I255), vfp_dp_const),
13732 cCE(fshtos, eba0a40, 2, (RVS, I16z), vfp_sp_conv_16),
13733 cCE(fshtod, eba0b40, 2, (RVD, I16z), vfp_dp_conv_16),
13734 cCE(fsltos, eba0ac0, 2, (RVS, I32), vfp_sp_conv_32),
13735 cCE(fsltod, eba0bc0, 2, (RVD, I32), vfp_dp_conv_32),
13736 cCE(fuhtos, ebb0a40, 2, (RVS, I16z), vfp_sp_conv_16),
13737 cCE(fuhtod, ebb0b40, 2, (RVD, I16z), vfp_dp_conv_16),
13738 cCE(fultos, ebb0ac0, 2, (RVS, I32), vfp_sp_conv_32),
13739 cCE(fultod, ebb0bc0, 2, (RVD, I32), vfp_dp_conv_32),
13740 cCE(ftoshs, ebe0a40, 2, (RVS, I16z), vfp_sp_conv_16),
13741 cCE(ftoshd, ebe0b40, 2, (RVD, I16z), vfp_dp_conv_16),
13742 cCE(ftosls, ebe0ac0, 2, (RVS, I32), vfp_sp_conv_32),
13743 cCE(ftosld, ebe0bc0, 2, (RVD, I32), vfp_dp_conv_32),
13744 cCE(ftouhs, ebf0a40, 2, (RVS, I16z), vfp_sp_conv_16),
13745 cCE(ftouhd, ebf0b40, 2, (RVD, I16z), vfp_dp_conv_16),
13746 cCE(ftouls, ebf0ac0, 2, (RVS, I32), vfp_sp_conv_32),
13747 cCE(ftould, ebf0bc0, 2, (RVD, I32), vfp_dp_conv_32),
13749 #undef THUMB_VARIANT
13751 #define ARM_VARIANT &arm_cext_xscale /* Intel XScale extensions. */
13752 cCE(mia, e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
13753 cCE(miaph, e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
13754 cCE(miabb, e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
13755 cCE(miabt, e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
13756 cCE(miatb, e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
13757 cCE(miatt, e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
13758 cCE(mar, c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
13759 cCE(mra, c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
13762 #define ARM_VARIANT &arm_cext_iwmmxt /* Intel Wireless MMX technology. */
13763 cCE(tandcb, e13f130, 1, (RR), iwmmxt_tandorc),
13764 cCE(tandch, e53f130, 1, (RR), iwmmxt_tandorc),
13765 cCE(tandcw, e93f130, 1, (RR), iwmmxt_tandorc),
13766 cCE(tbcstb, e400010, 2, (RIWR, RR), rn_rd),
13767 cCE(tbcsth, e400050, 2, (RIWR, RR), rn_rd),
13768 cCE(tbcstw, e400090, 2, (RIWR, RR), rn_rd),
13769 cCE(textrcb, e130170, 2, (RR, I7), iwmmxt_textrc),
13770 cCE(textrch, e530170, 2, (RR, I7), iwmmxt_textrc),
13771 cCE(textrcw, e930170, 2, (RR, I7), iwmmxt_textrc),
13772 cCE(textrmub, e100070, 3, (RR, RIWR, I7), iwmmxt_textrm),
13773 cCE(textrmuh, e500070, 3, (RR, RIWR, I7), iwmmxt_textrm),
13774 cCE(textrmuw, e900070, 3, (RR, RIWR, I7), iwmmxt_textrm),
13775 cCE(textrmsb, e100078, 3, (RR, RIWR, I7), iwmmxt_textrm),
13776 cCE(textrmsh, e500078, 3, (RR, RIWR, I7), iwmmxt_textrm),
13777 cCE(textrmsw, e900078, 3, (RR, RIWR, I7), iwmmxt_textrm),
13778 cCE(tinsrb, e600010, 3, (RIWR, RR, I7), iwmmxt_tinsr),
13779 cCE(tinsrh, e600050, 3, (RIWR, RR, I7), iwmmxt_tinsr),
13780 cCE(tinsrw, e600090, 3, (RIWR, RR, I7), iwmmxt_tinsr),
13781 cCE(tmcr, e000110, 2, (RIWC, RR), rn_rd),
13782 cCE(tmcrr, c400000, 3, (RIWR, RR, RR), rm_rd_rn),
13783 cCE(tmia, e200010, 3, (RIWR, RR, RR), iwmmxt_tmia),
13784 cCE(tmiaph, e280010, 3, (RIWR, RR, RR), iwmmxt_tmia),
13785 cCE(tmiabb, e2c0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
13786 cCE(tmiabt, e2d0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
13787 cCE(tmiatb, e2e0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
13788 cCE(tmiatt, e2f0010, 3, (RIWR, RR, RR), iwmmxt_tmia),
13789 cCE(tmovmskb, e100030, 2, (RR, RIWR), rd_rn),
13790 cCE(tmovmskh, e500030, 2, (RR, RIWR), rd_rn),
13791 cCE(tmovmskw, e900030, 2, (RR, RIWR), rd_rn),
13792 cCE(tmrc, e100110, 2, (RR, RIWC), rd_rn),
13793 cCE(tmrrc, c500000, 3, (RR, RR, RIWR), rd_rn_rm),
13794 cCE(torcb, e13f150, 1, (RR), iwmmxt_tandorc),
13795 cCE(torch, e53f150, 1, (RR), iwmmxt_tandorc),
13796 cCE(torcw, e93f150, 1, (RR), iwmmxt_tandorc),
13797 cCE(waccb, e0001c0, 2, (RIWR, RIWR), rd_rn),
13798 cCE(wacch, e4001c0, 2, (RIWR, RIWR), rd_rn),
13799 cCE(waccw, e8001c0, 2, (RIWR, RIWR), rd_rn),
13800 cCE(waddbss, e300180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13801 cCE(waddb, e000180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13802 cCE(waddbus, e100180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13803 cCE(waddhss, e700180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13804 cCE(waddh, e400180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13805 cCE(waddhus, e500180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13806 cCE(waddwss, eb00180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13807 cCE(waddw, e800180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13808 cCE(waddwus, e900180, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13809 cCE(waligni, e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
13810 cCE(walignr0, e800020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13811 cCE(walignr1, e900020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13812 cCE(walignr2, ea00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13813 cCE(walignr3, eb00020, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13814 cCE(wand, e200000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13815 cCE(wandn, e300000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13816 cCE(wavg2b, e800000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13817 cCE(wavg2br, e900000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13818 cCE(wavg2h, ec00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13819 cCE(wavg2hr, ed00000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13820 cCE(wcmpeqb, e000060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13821 cCE(wcmpeqh, e400060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13822 cCE(wcmpeqw, e800060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13823 cCE(wcmpgtub, e100060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13824 cCE(wcmpgtuh, e500060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13825 cCE(wcmpgtuw, e900060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13826 cCE(wcmpgtsb, e300060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13827 cCE(wcmpgtsh, e700060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13828 cCE(wcmpgtsw, eb00060, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13829 cCE(wldrb, c100000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
13830 cCE(wldrh, c500000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
13831 cCE(wldrw, c100100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
13832 cCE(wldrd, c500100, 2, (RIWR, ADDR), iwmmxt_wldstd),
13833 cCE(wmacs, e600100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13834 cCE(wmacsz, e700100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13835 cCE(wmacu, e400100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13836 cCE(wmacuz, e500100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13837 cCE(wmadds, ea00100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13838 cCE(wmaddu, e800100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13839 cCE(wmaxsb, e200160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13840 cCE(wmaxsh, e600160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13841 cCE(wmaxsw, ea00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13842 cCE(wmaxub, e000160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13843 cCE(wmaxuh, e400160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13844 cCE(wmaxuw, e800160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13845 cCE(wminsb, e300160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13846 cCE(wminsh, e700160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13847 cCE(wminsw, eb00160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13848 cCE(wminub, e100160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13849 cCE(wminuh, e500160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13850 cCE(wminuw, e900160, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13851 cCE(wmov, e000000, 2, (RIWR, RIWR), iwmmxt_wmov),
13852 cCE(wmulsm, e300100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13853 cCE(wmulsl, e200100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13854 cCE(wmulum, e100100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13855 cCE(wmulul, e000100, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13856 cCE(wor, e000000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13857 cCE(wpackhss, e700080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13858 cCE(wpackhus, e500080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13859 cCE(wpackwss, eb00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13860 cCE(wpackwus, e900080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13861 cCE(wpackdss, ef00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13862 cCE(wpackdus, ed00080, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13863 cCE(wrorh, e700040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13864 cCE(wrorhg, e700148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13865 cCE(wrorw, eb00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13866 cCE(wrorwg, eb00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13867 cCE(wrord, ef00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13868 cCE(wrordg, ef00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13869 cCE(wsadb, e000120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13870 cCE(wsadbz, e100120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13871 cCE(wsadh, e400120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13872 cCE(wsadhz, e500120, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13873 cCE(wshufh, e0001e0, 3, (RIWR, RIWR, I255), iwmmxt_wshufh),
13874 cCE(wsllh, e500040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13875 cCE(wsllhg, e500148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13876 cCE(wsllw, e900040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13877 cCE(wsllwg, e900148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13878 cCE(wslld, ed00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13879 cCE(wslldg, ed00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13880 cCE(wsrah, e400040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13881 cCE(wsrahg, e400148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13882 cCE(wsraw, e800040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13883 cCE(wsrawg, e800148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13884 cCE(wsrad, ec00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13885 cCE(wsradg, ec00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13886 cCE(wsrlh, e600040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13887 cCE(wsrlhg, e600148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13888 cCE(wsrlw, ea00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13889 cCE(wsrlwg, ea00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13890 cCE(wsrld, ee00040, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13891 cCE(wsrldg, ee00148, 3, (RIWR, RIWR, RIWG), rd_rn_rm),
13892 cCE(wstrb, c000000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
13893 cCE(wstrh, c400000, 2, (RIWR, ADDR), iwmmxt_wldstbh),
13894 cCE(wstrw, c000100, 2, (RIWR_RIWC, ADDR), iwmmxt_wldstw),
13895 cCE(wstrd, c400100, 2, (RIWR, ADDR), iwmmxt_wldstd),
13896 cCE(wsubbss, e3001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13897 cCE(wsubb, e0001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13898 cCE(wsubbus, e1001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13899 cCE(wsubhss, e7001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13900 cCE(wsubh, e4001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13901 cCE(wsubhus, e5001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13902 cCE(wsubwss, eb001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13903 cCE(wsubw, e8001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13904 cCE(wsubwus, e9001a0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13905 cCE(wunpckehub,e0000c0, 2, (RIWR, RIWR), rd_rn),
13906 cCE(wunpckehuh,e4000c0, 2, (RIWR, RIWR), rd_rn),
13907 cCE(wunpckehuw,e8000c0, 2, (RIWR, RIWR), rd_rn),
13908 cCE(wunpckehsb,e2000c0, 2, (RIWR, RIWR), rd_rn),
13909 cCE(wunpckehsh,e6000c0, 2, (RIWR, RIWR), rd_rn),
13910 cCE(wunpckehsw,ea000c0, 2, (RIWR, RIWR), rd_rn),
13911 cCE(wunpckihb, e1000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13912 cCE(wunpckihh, e5000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13913 cCE(wunpckihw, e9000c0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13914 cCE(wunpckelub,e0000e0, 2, (RIWR, RIWR), rd_rn),
13915 cCE(wunpckeluh,e4000e0, 2, (RIWR, RIWR), rd_rn),
13916 cCE(wunpckeluw,e8000e0, 2, (RIWR, RIWR), rd_rn),
13917 cCE(wunpckelsb,e2000e0, 2, (RIWR, RIWR), rd_rn),
13918 cCE(wunpckelsh,e6000e0, 2, (RIWR, RIWR), rd_rn),
13919 cCE(wunpckelsw,ea000e0, 2, (RIWR, RIWR), rd_rn),
13920 cCE(wunpckilb, e1000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13921 cCE(wunpckilh, e5000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13922 cCE(wunpckilw, e9000e0, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13923 cCE(wxor, e100000, 3, (RIWR, RIWR, RIWR), rd_rn_rm),
13924 cCE(wzero, e300000, 1, (RIWR), iwmmxt_wzero),
13927 #define ARM_VARIANT &arm_cext_maverick /* Cirrus Maverick instructions. */
13928 cCE(cfldrs, c100400, 2, (RMF, ADDR), rd_cpaddr),
13929 cCE(cfldrd, c500400, 2, (RMD, ADDR), rd_cpaddr),
13930 cCE(cfldr32, c100500, 2, (RMFX, ADDR), rd_cpaddr),
13931 cCE(cfldr64, c500500, 2, (RMDX, ADDR), rd_cpaddr),
13932 cCE(cfstrs, c000400, 2, (RMF, ADDR), rd_cpaddr),
13933 cCE(cfstrd, c400400, 2, (RMD, ADDR), rd_cpaddr),
13934 cCE(cfstr32, c000500, 2, (RMFX, ADDR), rd_cpaddr),
13935 cCE(cfstr64, c400500, 2, (RMDX, ADDR), rd_cpaddr),
13936 cCE(cfmvsr, e000450, 2, (RMF, RR), rn_rd),
13937 cCE(cfmvrs, e100450, 2, (RR, RMF), rd_rn),
13938 cCE(cfmvdlr, e000410, 2, (RMD, RR), rn_rd),
13939 cCE(cfmvrdl, e100410, 2, (RR, RMD), rd_rn),
13940 cCE(cfmvdhr, e000430, 2, (RMD, RR), rn_rd),
13941 cCE(cfmvrdh, e100430, 2, (RR, RMD), rd_rn),
13942 cCE(cfmv64lr, e000510, 2, (RMDX, RR), rn_rd),
13943 cCE(cfmvr64l, e100510, 2, (RR, RMDX), rd_rn),
13944 cCE(cfmv64hr, e000530, 2, (RMDX, RR), rn_rd),
13945 cCE(cfmvr64h, e100530, 2, (RR, RMDX), rd_rn),
13946 cCE(cfmval32, e200440, 2, (RMAX, RMFX), rd_rn),
13947 cCE(cfmv32al, e100440, 2, (RMFX, RMAX), rd_rn),
13948 cCE(cfmvam32, e200460, 2, (RMAX, RMFX), rd_rn),
13949 cCE(cfmv32am, e100460, 2, (RMFX, RMAX), rd_rn),
13950 cCE(cfmvah32, e200480, 2, (RMAX, RMFX), rd_rn),
13951 cCE(cfmv32ah, e100480, 2, (RMFX, RMAX), rd_rn),
13952 cCE(cfmva32, e2004a0, 2, (RMAX, RMFX), rd_rn),
13953 cCE(cfmv32a, e1004a0, 2, (RMFX, RMAX), rd_rn),
13954 cCE(cfmva64, e2004c0, 2, (RMAX, RMDX), rd_rn),
13955 cCE(cfmv64a, e1004c0, 2, (RMDX, RMAX), rd_rn),
13956 cCE(cfmvsc32, e2004e0, 2, (RMDS, RMDX), mav_dspsc),
13957 cCE(cfmv32sc, e1004e0, 2, (RMDX, RMDS), rd),
13958 cCE(cfcpys, e000400, 2, (RMF, RMF), rd_rn),
13959 cCE(cfcpyd, e000420, 2, (RMD, RMD), rd_rn),
13960 cCE(cfcvtsd, e000460, 2, (RMD, RMF), rd_rn),
13961 cCE(cfcvtds, e000440, 2, (RMF, RMD), rd_rn),
13962 cCE(cfcvt32s, e000480, 2, (RMF, RMFX), rd_rn),
13963 cCE(cfcvt32d, e0004a0, 2, (RMD, RMFX), rd_rn),
13964 cCE(cfcvt64s, e0004c0, 2, (RMF, RMDX), rd_rn),
13965 cCE(cfcvt64d, e0004e0, 2, (RMD, RMDX), rd_rn),
13966 cCE(cfcvts32, e100580, 2, (RMFX, RMF), rd_rn),
13967 cCE(cfcvtd32, e1005a0, 2, (RMFX, RMD), rd_rn),
13968 cCE(cftruncs32,e1005c0, 2, (RMFX, RMF), rd_rn),
13969 cCE(cftruncd32,e1005e0, 2, (RMFX, RMD), rd_rn),
13970 cCE(cfrshl32, e000550, 3, (RMFX, RMFX, RR), mav_triple),
13971 cCE(cfrshl64, e000570, 3, (RMDX, RMDX, RR), mav_triple),
13972 cCE(cfsh32, e000500, 3, (RMFX, RMFX, I63s), mav_shift),
13973 cCE(cfsh64, e200500, 3, (RMDX, RMDX, I63s), mav_shift),
13974 cCE(cfcmps, e100490, 3, (RR, RMF, RMF), rd_rn_rm),
13975 cCE(cfcmpd, e1004b0, 3, (RR, RMD, RMD), rd_rn_rm),
13976 cCE(cfcmp32, e100590, 3, (RR, RMFX, RMFX), rd_rn_rm),
13977 cCE(cfcmp64, e1005b0, 3, (RR, RMDX, RMDX), rd_rn_rm),
13978 cCE(cfabss, e300400, 2, (RMF, RMF), rd_rn),
13979 cCE(cfabsd, e300420, 2, (RMD, RMD), rd_rn),
13980 cCE(cfnegs, e300440, 2, (RMF, RMF), rd_rn),
13981 cCE(cfnegd, e300460, 2, (RMD, RMD), rd_rn),
13982 cCE(cfadds, e300480, 3, (RMF, RMF, RMF), rd_rn_rm),
13983 cCE(cfaddd, e3004a0, 3, (RMD, RMD, RMD), rd_rn_rm),
13984 cCE(cfsubs, e3004c0, 3, (RMF, RMF, RMF), rd_rn_rm),
13985 cCE(cfsubd, e3004e0, 3, (RMD, RMD, RMD), rd_rn_rm),
13986 cCE(cfmuls, e100400, 3, (RMF, RMF, RMF), rd_rn_rm),
13987 cCE(cfmuld, e100420, 3, (RMD, RMD, RMD), rd_rn_rm),
13988 cCE(cfabs32, e300500, 2, (RMFX, RMFX), rd_rn),
13989 cCE(cfabs64, e300520, 2, (RMDX, RMDX), rd_rn),
13990 cCE(cfneg32, e300540, 2, (RMFX, RMFX), rd_rn),
13991 cCE(cfneg64, e300560, 2, (RMDX, RMDX), rd_rn),
13992 cCE(cfadd32, e300580, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
13993 cCE(cfadd64, e3005a0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
13994 cCE(cfsub32, e3005c0, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
13995 cCE(cfsub64, e3005e0, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
13996 cCE(cfmul32, e100500, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
13997 cCE(cfmul64, e100520, 3, (RMDX, RMDX, RMDX), rd_rn_rm),
13998 cCE(cfmac32, e100540, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
13999 cCE(cfmsc32, e100560, 3, (RMFX, RMFX, RMFX), rd_rn_rm),
14000 cCE(cfmadd32, e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14001 cCE(cfmsub32, e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
14002 cCE(cfmadda32, e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14003 cCE(cfmsuba32, e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
14006 #undef THUMB_VARIANT
14033 /* MD interface: bits in the object file. */
14035 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
14036 for use in the a.out file, and stores them in the array pointed to by buf.
14037 This knows about the endian-ness of the target machine and does
14038 THE RIGHT THING, whatever it is. Possible values for n are 1 (byte)
14039 2 (short) and 4 (long) Floating numbers are put out as a series of
14040 LITTLENUMS (shorts, here at least). */
14043 md_number_to_chars (char * buf, valueT val, int n)
14045 if (target_big_endian)
14046 number_to_chars_bigendian (buf, val, n);
14048 number_to_chars_littleendian (buf, val, n);
14052 md_chars_to_number (char * buf, int n)
14055 unsigned char * where = (unsigned char *) buf;
14057 if (target_big_endian)
14062 result |= (*where++ & 255);
14070 result |= (where[n] & 255);
14077 /* MD interface: Sections. */
14079 /* Estimate the size of a frag before relaxing. Assume everything fits in
14083 md_estimate_size_before_relax (fragS * fragp,
14084 segT segtype ATTRIBUTE_UNUSED)
14090 /* Convert a machine dependent frag. */
14093 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
14095 unsigned long insn;
14096 unsigned long old_op;
14104 buf = fragp->fr_literal + fragp->fr_fix;
14106 old_op = bfd_get_16(abfd, buf);
14107 if (fragp->fr_symbol) {
14108 exp.X_op = O_symbol;
14109 exp.X_add_symbol = fragp->fr_symbol;
14111 exp.X_op = O_constant;
14113 exp.X_add_number = fragp->fr_offset;
14114 opcode = fragp->fr_subtype;
14117 case T_MNEM_ldr_pc:
14118 case T_MNEM_ldr_pc2:
14119 case T_MNEM_ldr_sp:
14120 case T_MNEM_str_sp:
14127 if (fragp->fr_var == 4)
14129 insn = THUMB_OP32(opcode);
14130 if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
14132 insn |= (old_op & 0x700) << 4;
14136 insn |= (old_op & 7) << 12;
14137 insn |= (old_op & 0x38) << 13;
14139 insn |= 0x00000c00;
14140 put_thumb32_insn (buf, insn);
14141 reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
14145 reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
14147 pc_rel = (opcode == T_MNEM_ldr_pc2);
14150 if (fragp->fr_var == 4)
14152 insn = THUMB_OP32 (opcode);
14153 insn |= (old_op & 0xf0) << 4;
14154 put_thumb32_insn (buf, insn);
14155 reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
14159 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14160 exp.X_add_number -= 4;
14168 if (fragp->fr_var == 4)
14170 int r0off = (opcode == T_MNEM_mov
14171 || opcode == T_MNEM_movs) ? 0 : 8;
14172 insn = THUMB_OP32 (opcode);
14173 insn = (insn & 0xe1ffffff) | 0x10000000;
14174 insn |= (old_op & 0x700) << r0off;
14175 put_thumb32_insn (buf, insn);
14176 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14180 reloc_type = BFD_RELOC_ARM_THUMB_IMM;
14185 if (fragp->fr_var == 4)
14187 insn = THUMB_OP32(opcode);
14188 put_thumb32_insn (buf, insn);
14189 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
14192 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
14196 if (fragp->fr_var == 4)
14198 insn = THUMB_OP32(opcode);
14199 insn |= (old_op & 0xf00) << 14;
14200 put_thumb32_insn (buf, insn);
14201 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
14204 reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
14207 case T_MNEM_add_sp:
14208 case T_MNEM_add_pc:
14209 case T_MNEM_inc_sp:
14210 case T_MNEM_dec_sp:
14211 if (fragp->fr_var == 4)
14213 /* ??? Choose between add and addw. */
14214 insn = THUMB_OP32 (opcode);
14215 insn |= (old_op & 0xf0) << 4;
14216 put_thumb32_insn (buf, insn);
14217 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14220 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14228 if (fragp->fr_var == 4)
14230 insn = THUMB_OP32 (opcode);
14231 insn |= (old_op & 0xf0) << 4;
14232 insn |= (old_op & 0xf) << 16;
14233 put_thumb32_insn (buf, insn);
14234 reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
14237 reloc_type = BFD_RELOC_ARM_THUMB_ADD;
14243 fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
14245 fixp->fx_file = fragp->fr_file;
14246 fixp->fx_line = fragp->fr_line;
14247 fragp->fr_fix += fragp->fr_var;
14250 /* Return the size of a relaxable immediate operand instruction.
14251 SHIFT and SIZE specify the form of the allowable immediate. */
14253 relax_immediate (fragS *fragp, int size, int shift)
14259 /* ??? Should be able to do better than this. */
14260 if (fragp->fr_symbol)
14263 low = (1 << shift) - 1;
14264 mask = (1 << (shift + size)) - (1 << shift);
14265 offset = fragp->fr_offset;
14266 /* Force misaligned offsets to 32-bit variant. */
14269 if (offset & ~mask)
14274 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
14277 relax_adr (fragS *fragp, asection *sec)
14282 /* Assume worst case for symbols not known to be in the same section. */
14283 if (!S_IS_DEFINED(fragp->fr_symbol)
14284 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14287 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14288 addr = fragp->fr_address + fragp->fr_fix;
14289 addr = (addr + 4) & ~3;
14290 /* Fix the insn as the 4-byte version if the target address is not
14291 sufficiently aligned. This is prevents an infinite loop when two
14292 instructions have contradictory range/alignment requirements. */
14296 if (val < 0 || val > 1020)
14301 /* Return the size of a relaxable add/sub immediate instruction. */
14303 relax_addsub (fragS *fragp, asection *sec)
14308 buf = fragp->fr_literal + fragp->fr_fix;
14309 op = bfd_get_16(sec->owner, buf);
14310 if ((op & 0xf) == ((op >> 4) & 0xf))
14311 return relax_immediate (fragp, 8, 0);
14313 return relax_immediate (fragp, 3, 0);
14317 /* Return the size of a relaxable branch instruction. BITS is the
14318 size of the offset field in the narrow instruction. */
14321 relax_branch (fragS *fragp, asection *sec, int bits)
14327 /* Assume worst case for symbols not known to be in the same section. */
14328 if (!S_IS_DEFINED(fragp->fr_symbol)
14329 || sec != S_GET_SEGMENT (fragp->fr_symbol))
14332 val = S_GET_VALUE(fragp->fr_symbol) + fragp->fr_offset;
14333 addr = fragp->fr_address + fragp->fr_fix + 4;
14336 /* Offset is a signed value *2 */
14338 if (val >= limit || val < -limit)
14344 /* Relax a machine dependent frag. This returns the amount by which
14345 the current size of the frag should change. */
14348 arm_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
14353 oldsize = fragp->fr_var;
14354 switch (fragp->fr_subtype)
14356 case T_MNEM_ldr_pc2:
14357 newsize = relax_adr(fragp, sec);
14359 case T_MNEM_ldr_pc:
14360 case T_MNEM_ldr_sp:
14361 case T_MNEM_str_sp:
14362 newsize = relax_immediate(fragp, 8, 2);
14366 newsize = relax_immediate(fragp, 5, 2);
14370 newsize = relax_immediate(fragp, 5, 1);
14374 newsize = relax_immediate(fragp, 5, 0);
14377 newsize = relax_adr(fragp, sec);
14383 newsize = relax_immediate(fragp, 8, 0);
14386 newsize = relax_branch(fragp, sec, 11);
14389 newsize = relax_branch(fragp, sec, 8);
14391 case T_MNEM_add_sp:
14392 case T_MNEM_add_pc:
14393 newsize = relax_immediate (fragp, 8, 2);
14395 case T_MNEM_inc_sp:
14396 case T_MNEM_dec_sp:
14397 newsize = relax_immediate (fragp, 7, 2);
14403 newsize = relax_addsub (fragp, sec);
14410 fragp->fr_var = -newsize;
14411 md_convert_frag (sec->owner, sec, fragp);
14413 return -(newsize + oldsize);
14415 fragp->fr_var = newsize;
14416 return newsize - oldsize;
14419 /* Round up a section size to the appropriate boundary. */
14422 md_section_align (segT segment ATTRIBUTE_UNUSED,
14428 /* Round all sects to multiple of 4. */
14429 return (size + 3) & ~3;
14433 /* This is called from HANDLE_ALIGN in write.c. Fill in the contents
14434 of an rs_align_code fragment. */
14437 arm_handle_align (fragS * fragP)
14439 static char const arm_noop[4] = { 0x00, 0x00, 0xa0, 0xe1 };
14440 static char const thumb_noop[2] = { 0xc0, 0x46 };
14441 static char const arm_bigend_noop[4] = { 0xe1, 0xa0, 0x00, 0x00 };
14442 static char const thumb_bigend_noop[2] = { 0x46, 0xc0 };
14444 int bytes, fix, noop_size;
14448 if (fragP->fr_type != rs_align_code)
14451 bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
14452 p = fragP->fr_literal + fragP->fr_fix;
14455 if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
14456 bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
14458 if (fragP->tc_frag_data)
14460 if (target_big_endian)
14461 noop = thumb_bigend_noop;
14464 noop_size = sizeof (thumb_noop);
14468 if (target_big_endian)
14469 noop = arm_bigend_noop;
14472 noop_size = sizeof (arm_noop);
14475 if (bytes & (noop_size - 1))
14477 fix = bytes & (noop_size - 1);
14478 memset (p, 0, fix);
14483 while (bytes >= noop_size)
14485 memcpy (p, noop, noop_size);
14487 bytes -= noop_size;
14491 fragP->fr_fix += fix;
14492 fragP->fr_var = noop_size;
14495 /* Called from md_do_align. Used to create an alignment
14496 frag in a code section. */
14499 arm_frag_align_code (int n, int max)
14503 /* We assume that there will never be a requirement
14504 to support alignments greater than 32 bytes. */
14505 if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
14506 as_fatal (_("alignments greater than 32 bytes not supported in .text sections."));
14508 p = frag_var (rs_align_code,
14509 MAX_MEM_FOR_RS_ALIGN_CODE,
14511 (relax_substateT) max,
14518 /* Perform target specific initialisation of a frag. */
14521 arm_init_frag (fragS * fragP)
14523 /* Record whether this frag is in an ARM or a THUMB area. */
14524 fragP->tc_frag_data = thumb_mode;
14528 /* When we change sections we need to issue a new mapping symbol. */
14531 arm_elf_change_section (void)
14534 segment_info_type *seginfo;
14536 /* Link an unlinked unwind index table section to the .text section. */
14537 if (elf_section_type (now_seg) == SHT_ARM_EXIDX
14538 && elf_linked_to_section (now_seg) == NULL)
14539 elf_linked_to_section (now_seg) = text_section;
14541 if (!SEG_NORMAL (now_seg))
14544 flags = bfd_get_section_flags (stdoutput, now_seg);
14546 /* We can ignore sections that only contain debug info. */
14547 if ((flags & SEC_ALLOC) == 0)
14550 seginfo = seg_info (now_seg);
14551 mapstate = seginfo->tc_segment_info_data.mapstate;
14552 marked_pr_dependency = seginfo->tc_segment_info_data.marked_pr_dependency;
14556 arm_elf_section_type (const char * str, size_t len)
14558 if (len == 5 && strncmp (str, "exidx", 5) == 0)
14559 return SHT_ARM_EXIDX;
14564 /* Code to deal with unwinding tables. */
14566 static void add_unwind_adjustsp (offsetT);
14568 /* Cenerate and deferred unwind frame offset. */
14571 flush_pending_unwind (void)
14575 offset = unwind.pending_offset;
14576 unwind.pending_offset = 0;
14578 add_unwind_adjustsp (offset);
14581 /* Add an opcode to this list for this function. Two-byte opcodes should
14582 be passed as op[0] << 8 | op[1]. The list of opcodes is built in reverse
14586 add_unwind_opcode (valueT op, int length)
14588 /* Add any deferred stack adjustment. */
14589 if (unwind.pending_offset)
14590 flush_pending_unwind ();
14592 unwind.sp_restored = 0;
14594 if (unwind.opcode_count + length > unwind.opcode_alloc)
14596 unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
14597 if (unwind.opcodes)
14598 unwind.opcodes = xrealloc (unwind.opcodes,
14599 unwind.opcode_alloc);
14601 unwind.opcodes = xmalloc (unwind.opcode_alloc);
14606 unwind.opcodes[unwind.opcode_count] = op & 0xff;
14608 unwind.opcode_count++;
14612 /* Add unwind opcodes to adjust the stack pointer. */
14615 add_unwind_adjustsp (offsetT offset)
14619 if (offset > 0x200)
14621 /* We need at most 5 bytes to hold a 32-bit value in a uleb128. */
14626 /* Long form: 0xb2, uleb128. */
14627 /* This might not fit in a word so add the individual bytes,
14628 remembering the list is built in reverse order. */
14629 o = (valueT) ((offset - 0x204) >> 2);
14631 add_unwind_opcode (0, 1);
14633 /* Calculate the uleb128 encoding of the offset. */
14637 bytes[n] = o & 0x7f;
14643 /* Add the insn. */
14645 add_unwind_opcode (bytes[n - 1], 1);
14646 add_unwind_opcode (0xb2, 1);
14648 else if (offset > 0x100)
14650 /* Two short opcodes. */
14651 add_unwind_opcode (0x3f, 1);
14652 op = (offset - 0x104) >> 2;
14653 add_unwind_opcode (op, 1);
14655 else if (offset > 0)
14657 /* Short opcode. */
14658 op = (offset - 4) >> 2;
14659 add_unwind_opcode (op, 1);
14661 else if (offset < 0)
14664 while (offset > 0x100)
14666 add_unwind_opcode (0x7f, 1);
14669 op = ((offset - 4) >> 2) | 0x40;
14670 add_unwind_opcode (op, 1);
14674 /* Finish the list of unwind opcodes for this function. */
14676 finish_unwind_opcodes (void)
14680 if (unwind.fp_used)
14682 /* Adjust sp as necessary. */
14683 unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
14684 flush_pending_unwind ();
14686 /* After restoring sp from the frame pointer. */
14687 op = 0x90 | unwind.fp_reg;
14688 add_unwind_opcode (op, 1);
14691 flush_pending_unwind ();
14695 /* Start an exception table entry. If idx is nonzero this is an index table
14699 start_unwind_section (const segT text_seg, int idx)
14701 const char * text_name;
14702 const char * prefix;
14703 const char * prefix_once;
14704 const char * group_name;
14708 size_t sec_name_len;
14715 prefix = ELF_STRING_ARM_unwind;
14716 prefix_once = ELF_STRING_ARM_unwind_once;
14717 type = SHT_ARM_EXIDX;
14721 prefix = ELF_STRING_ARM_unwind_info;
14722 prefix_once = ELF_STRING_ARM_unwind_info_once;
14723 type = SHT_PROGBITS;
14726 text_name = segment_name (text_seg);
14727 if (streq (text_name, ".text"))
14730 if (strncmp (text_name, ".gnu.linkonce.t.",
14731 strlen (".gnu.linkonce.t.")) == 0)
14733 prefix = prefix_once;
14734 text_name += strlen (".gnu.linkonce.t.");
14737 prefix_len = strlen (prefix);
14738 text_len = strlen (text_name);
14739 sec_name_len = prefix_len + text_len;
14740 sec_name = xmalloc (sec_name_len + 1);
14741 memcpy (sec_name, prefix, prefix_len);
14742 memcpy (sec_name + prefix_len, text_name, text_len);
14743 sec_name[prefix_len + text_len] = '\0';
14749 /* Handle COMDAT group. */
14750 if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
14752 group_name = elf_group_name (text_seg);
14753 if (group_name == NULL)
14755 as_bad ("Group section `%s' has no group signature",
14756 segment_name (text_seg));
14757 ignore_rest_of_line ();
14760 flags |= SHF_GROUP;
14764 obj_elf_change_section (sec_name, type, flags, 0, group_name, linkonce, 0);
14766 /* Set the setion link for index tables. */
14768 elf_linked_to_section (now_seg) = text_seg;
14772 /* Start an unwind table entry. HAVE_DATA is nonzero if we have additional
14773 personality routine data. Returns zero, or the index table value for
14774 and inline entry. */
14777 create_unwind_entry (int have_data)
14782 /* The current word of data. */
14784 /* The number of bytes left in this word. */
14787 finish_unwind_opcodes ();
14789 /* Remember the current text section. */
14790 unwind.saved_seg = now_seg;
14791 unwind.saved_subseg = now_subseg;
14793 start_unwind_section (now_seg, 0);
14795 if (unwind.personality_routine == NULL)
14797 if (unwind.personality_index == -2)
14800 as_bad (_("handerdata in cantunwind frame"));
14801 return 1; /* EXIDX_CANTUNWIND. */
14804 /* Use a default personality routine if none is specified. */
14805 if (unwind.personality_index == -1)
14807 if (unwind.opcode_count > 3)
14808 unwind.personality_index = 1;
14810 unwind.personality_index = 0;
14813 /* Space for the personality routine entry. */
14814 if (unwind.personality_index == 0)
14816 if (unwind.opcode_count > 3)
14817 as_bad (_("too many unwind opcodes for personality routine 0"));
14821 /* All the data is inline in the index table. */
14824 while (unwind.opcode_count > 0)
14826 unwind.opcode_count--;
14827 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
14831 /* Pad with "finish" opcodes. */
14833 data = (data << 8) | 0xb0;
14840 /* We get two opcodes "free" in the first word. */
14841 size = unwind.opcode_count - 2;
14844 /* An extra byte is required for the opcode count. */
14845 size = unwind.opcode_count + 1;
14847 size = (size + 3) >> 2;
14849 as_bad (_("too many unwind opcodes"));
14851 frag_align (2, 0, 0);
14852 record_alignment (now_seg, 2);
14853 unwind.table_entry = expr_build_dot ();
14855 /* Allocate the table entry. */
14856 ptr = frag_more ((size << 2) + 4);
14857 where = frag_now_fix () - ((size << 2) + 4);
14859 switch (unwind.personality_index)
14862 /* ??? Should this be a PLT generating relocation? */
14863 /* Custom personality routine. */
14864 fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
14865 BFD_RELOC_ARM_PREL31);
14870 /* Set the first byte to the number of additional words. */
14875 /* ABI defined personality routines. */
14877 /* Three opcodes bytes are packed into the first word. */
14884 /* The size and first two opcode bytes go in the first word. */
14885 data = ((0x80 + unwind.personality_index) << 8) | size;
14890 /* Should never happen. */
14894 /* Pack the opcodes into words (MSB first), reversing the list at the same
14896 while (unwind.opcode_count > 0)
14900 md_number_to_chars (ptr, data, 4);
14905 unwind.opcode_count--;
14907 data = (data << 8) | unwind.opcodes[unwind.opcode_count];
14910 /* Finish off the last word. */
14913 /* Pad with "finish" opcodes. */
14915 data = (data << 8) | 0xb0;
14917 md_number_to_chars (ptr, data, 4);
14922 /* Add an empty descriptor if there is no user-specified data. */
14923 ptr = frag_more (4);
14924 md_number_to_chars (ptr, 0, 4);
14930 /* Convert REGNAME to a DWARF-2 register number. */
14933 tc_arm_regname_to_dw2regnum (const char *regname)
14935 int reg = arm_reg_parse ((char **) ®name, REG_TYPE_RN, NULL);
14943 /* Initialize the DWARF-2 unwind information for this procedure. */
14946 tc_arm_frame_initial_instructions (void)
14948 cfi_add_CFA_def_cfa (REG_SP, 0);
14950 #endif /* OBJ_ELF */
14953 /* MD interface: Symbol and relocation handling. */
14955 /* Return the address within the segment that a PC-relative fixup is
14956 relative to. For ARM, PC-relative fixups applied to instructions
14957 are generally relative to the location of the fixup plus 8 bytes.
14958 Thumb branches are offset by 4, and Thumb loads relative to PC
14959 require special handling. */
14962 md_pcrel_from_section (fixS * fixP, segT seg)
14964 offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
14966 /* If this is pc-relative and we are going to emit a relocation
14967 then we just want to put out any pipeline compensation that the linker
14968 will need. Otherwise we want to use the calculated base. */
14970 && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
14971 || arm_force_relocation (fixP)))
14974 switch (fixP->fx_r_type)
14976 /* PC relative addressing on the Thumb is slightly odd as the
14977 bottom two bits of the PC are forced to zero for the
14978 calculation. This happens *after* application of the
14979 pipeline offset. However, Thumb adrl already adjusts for
14980 this, so we need not do it again. */
14981 case BFD_RELOC_ARM_THUMB_ADD:
14984 case BFD_RELOC_ARM_THUMB_OFFSET:
14985 case BFD_RELOC_ARM_T32_OFFSET_IMM:
14986 case BFD_RELOC_ARM_T32_ADD_PC12:
14987 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
14988 return (base + 4) & ~3;
14990 /* Thumb branches are simply offset by +4. */
14991 case BFD_RELOC_THUMB_PCREL_BRANCH7:
14992 case BFD_RELOC_THUMB_PCREL_BRANCH9:
14993 case BFD_RELOC_THUMB_PCREL_BRANCH12:
14994 case BFD_RELOC_THUMB_PCREL_BRANCH20:
14995 case BFD_RELOC_THUMB_PCREL_BRANCH23:
14996 case BFD_RELOC_THUMB_PCREL_BRANCH25:
14997 case BFD_RELOC_THUMB_PCREL_BLX:
15000 /* ARM mode branches are offset by +8. However, the Windows CE
15001 loader expects the relocation not to take this into account. */
15002 case BFD_RELOC_ARM_PCREL_BRANCH:
15003 case BFD_RELOC_ARM_PCREL_CALL:
15004 case BFD_RELOC_ARM_PCREL_JUMP:
15005 case BFD_RELOC_ARM_PCREL_BLX:
15006 case BFD_RELOC_ARM_PLT32:
15013 /* ARM mode loads relative to PC are also offset by +8. Unlike
15014 branches, the Windows CE loader *does* expect the relocation
15015 to take this into account. */
15016 case BFD_RELOC_ARM_OFFSET_IMM:
15017 case BFD_RELOC_ARM_OFFSET_IMM8:
15018 case BFD_RELOC_ARM_HWLITERAL:
15019 case BFD_RELOC_ARM_LITERAL:
15020 case BFD_RELOC_ARM_CP_OFF_IMM:
15024 /* Other PC-relative relocations are un-offset. */
15030 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
15031 Otherwise we have no need to default values of symbols. */
15034 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
15037 if (name[0] == '_' && name[1] == 'G'
15038 && streq (name, GLOBAL_OFFSET_TABLE_NAME))
15042 if (symbol_find (name))
15043 as_bad ("GOT already in the symbol table");
15045 GOT_symbol = symbol_new (name, undefined_section,
15046 (valueT) 0, & zero_address_frag);
15056 /* Subroutine of md_apply_fix. Check to see if an immediate can be
15057 computed as two separate immediate values, added together. We
15058 already know that this value cannot be computed by just one ARM
15061 static unsigned int
15062 validate_immediate_twopart (unsigned int val,
15063 unsigned int * highpart)
15068 for (i = 0; i < 32; i += 2)
15069 if (((a = rotate_left (val, i)) & 0xff) != 0)
15075 * highpart = (a >> 8) | ((i + 24) << 7);
15077 else if (a & 0xff0000)
15079 if (a & 0xff000000)
15081 * highpart = (a >> 16) | ((i + 16) << 7);
15085 assert (a & 0xff000000);
15086 * highpart = (a >> 24) | ((i + 8) << 7);
15089 return (a & 0xff) | (i << 7);
15096 validate_offset_imm (unsigned int val, int hwse)
15098 if ((hwse && val > 255) || val > 4095)
15103 /* Subroutine of md_apply_fix. Do those data_ops which can take a
15104 negative immediate constant by altering the instruction. A bit of
15109 by inverting the second operand, and
15112 by negating the second operand. */
15115 negate_data_op (unsigned long * instruction,
15116 unsigned long value)
15119 unsigned long negated, inverted;
15121 negated = encode_arm_immediate (-value);
15122 inverted = encode_arm_immediate (~value);
15124 op = (*instruction >> DATA_OP_SHIFT) & 0xf;
15127 /* First negates. */
15128 case OPCODE_SUB: /* ADD <-> SUB */
15129 new_inst = OPCODE_ADD;
15134 new_inst = OPCODE_SUB;
15138 case OPCODE_CMP: /* CMP <-> CMN */
15139 new_inst = OPCODE_CMN;
15144 new_inst = OPCODE_CMP;
15148 /* Now Inverted ops. */
15149 case OPCODE_MOV: /* MOV <-> MVN */
15150 new_inst = OPCODE_MVN;
15155 new_inst = OPCODE_MOV;
15159 case OPCODE_AND: /* AND <-> BIC */
15160 new_inst = OPCODE_BIC;
15165 new_inst = OPCODE_AND;
15169 case OPCODE_ADC: /* ADC <-> SBC */
15170 new_inst = OPCODE_SBC;
15175 new_inst = OPCODE_ADC;
15179 /* We cannot do anything. */
15184 if (value == (unsigned) FAIL)
15187 *instruction &= OPCODE_MASK;
15188 *instruction |= new_inst << DATA_OP_SHIFT;
15192 /* Like negate_data_op, but for Thumb-2. */
15194 static unsigned int
15195 thumb32_negate_data_op (offsetT *instruction, offsetT value)
15199 offsetT negated, inverted;
15201 negated = encode_thumb32_immediate (-value);
15202 inverted = encode_thumb32_immediate (~value);
15204 rd = (*instruction >> 8) & 0xf;
15205 op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
15208 /* ADD <-> SUB. Includes CMP <-> CMN. */
15209 case T2_OPCODE_SUB:
15210 new_inst = T2_OPCODE_ADD;
15214 case T2_OPCODE_ADD:
15215 new_inst = T2_OPCODE_SUB;
15219 /* ORR <-> ORN. Includes MOV <-> MVN. */
15220 case T2_OPCODE_ORR:
15221 new_inst = T2_OPCODE_ORN;
15225 case T2_OPCODE_ORN:
15226 new_inst = T2_OPCODE_ORR;
15230 /* AND <-> BIC. TST has no inverted equivalent. */
15231 case T2_OPCODE_AND:
15232 new_inst = T2_OPCODE_BIC;
15239 case T2_OPCODE_BIC:
15240 new_inst = T2_OPCODE_AND;
15245 case T2_OPCODE_ADC:
15246 new_inst = T2_OPCODE_SBC;
15250 case T2_OPCODE_SBC:
15251 new_inst = T2_OPCODE_ADC;
15255 /* We cannot do anything. */
15263 *instruction &= T2_OPCODE_MASK;
15264 *instruction |= new_inst << T2_DATA_OP_SHIFT;
15268 /* Read a 32-bit thumb instruction from buf. */
15269 static unsigned long
15270 get_thumb32_insn (char * buf)
15272 unsigned long insn;
15273 insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
15274 insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15280 md_apply_fix (fixS * fixP,
15284 offsetT value = * valP;
15286 unsigned int newimm;
15287 unsigned long temp;
15289 char * buf = fixP->fx_where + fixP->fx_frag->fr_literal;
15291 assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
15293 /* Note whether this will delete the relocation. */
15294 if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
15297 /* On a 64-bit host, silently truncate 'value' to 32 bits for
15298 consistency with the behavior on 32-bit hosts. Remember value
15300 value &= 0xffffffff;
15301 value ^= 0x80000000;
15302 value -= 0x80000000;
15305 fixP->fx_addnumber = value;
15307 /* Same treatment for fixP->fx_offset. */
15308 fixP->fx_offset &= 0xffffffff;
15309 fixP->fx_offset ^= 0x80000000;
15310 fixP->fx_offset -= 0x80000000;
15312 switch (fixP->fx_r_type)
15314 case BFD_RELOC_NONE:
15315 /* This will need to go in the object file. */
15319 case BFD_RELOC_ARM_IMMEDIATE:
15320 /* We claim that this fixup has been processed here,
15321 even if in fact we generate an error because we do
15322 not have a reloc for it, so tc_gen_reloc will reject it. */
15326 && ! S_IS_DEFINED (fixP->fx_addsy))
15328 as_bad_where (fixP->fx_file, fixP->fx_line,
15329 _("undefined symbol %s used as an immediate value"),
15330 S_GET_NAME (fixP->fx_addsy));
15334 newimm = encode_arm_immediate (value);
15335 temp = md_chars_to_number (buf, INSN_SIZE);
15337 /* If the instruction will fail, see if we can fix things up by
15338 changing the opcode. */
15339 if (newimm == (unsigned int) FAIL
15340 && (newimm = negate_data_op (&temp, value)) == (unsigned int) FAIL)
15342 as_bad_where (fixP->fx_file, fixP->fx_line,
15343 _("invalid constant (%lx) after fixup"),
15344 (unsigned long) value);
15348 newimm |= (temp & 0xfffff000);
15349 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15352 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
15354 unsigned int highpart = 0;
15355 unsigned int newinsn = 0xe1a00000; /* nop. */
15357 newimm = encode_arm_immediate (value);
15358 temp = md_chars_to_number (buf, INSN_SIZE);
15360 /* If the instruction will fail, see if we can fix things up by
15361 changing the opcode. */
15362 if (newimm == (unsigned int) FAIL
15363 && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
15365 /* No ? OK - try using two ADD instructions to generate
15367 newimm = validate_immediate_twopart (value, & highpart);
15369 /* Yes - then make sure that the second instruction is
15371 if (newimm != (unsigned int) FAIL)
15373 /* Still No ? Try using a negated value. */
15374 else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
15375 temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
15376 /* Otherwise - give up. */
15379 as_bad_where (fixP->fx_file, fixP->fx_line,
15380 _("unable to compute ADRL instructions for PC offset of 0x%lx"),
15385 /* Replace the first operand in the 2nd instruction (which
15386 is the PC) with the destination register. We have
15387 already added in the PC in the first instruction and we
15388 do not want to do it again. */
15389 newinsn &= ~ 0xf0000;
15390 newinsn |= ((newinsn & 0x0f000) << 4);
15393 newimm |= (temp & 0xfffff000);
15394 md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
15396 highpart |= (newinsn & 0xfffff000);
15397 md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
15401 case BFD_RELOC_ARM_OFFSET_IMM:
15402 if (!fixP->fx_done && seg->use_rela_p)
15405 case BFD_RELOC_ARM_LITERAL:
15411 if (validate_offset_imm (value, 0) == FAIL)
15413 if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
15414 as_bad_where (fixP->fx_file, fixP->fx_line,
15415 _("invalid literal constant: pool needs to be closer"));
15417 as_bad_where (fixP->fx_file, fixP->fx_line,
15418 _("bad immediate value for offset (%ld)"),
15423 newval = md_chars_to_number (buf, INSN_SIZE);
15424 newval &= 0xff7ff000;
15425 newval |= value | (sign ? INDEX_UP : 0);
15426 md_number_to_chars (buf, newval, INSN_SIZE);
15429 case BFD_RELOC_ARM_OFFSET_IMM8:
15430 case BFD_RELOC_ARM_HWLITERAL:
15436 if (validate_offset_imm (value, 1) == FAIL)
15438 if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
15439 as_bad_where (fixP->fx_file, fixP->fx_line,
15440 _("invalid literal constant: pool needs to be closer"));
15442 as_bad (_("bad immediate value for half-word offset (%ld)"),
15447 newval = md_chars_to_number (buf, INSN_SIZE);
15448 newval &= 0xff7ff0f0;
15449 newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
15450 md_number_to_chars (buf, newval, INSN_SIZE);
15453 case BFD_RELOC_ARM_T32_OFFSET_U8:
15454 if (value < 0 || value > 1020 || value % 4 != 0)
15455 as_bad_where (fixP->fx_file, fixP->fx_line,
15456 _("bad immediate value for offset (%ld)"), (long) value);
15459 newval = md_chars_to_number (buf+2, THUMB_SIZE);
15461 md_number_to_chars (buf+2, newval, THUMB_SIZE);
15464 case BFD_RELOC_ARM_T32_OFFSET_IMM:
15465 /* This is a complicated relocation used for all varieties of Thumb32
15466 load/store instruction with immediate offset:
15468 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
15469 *4, optional writeback(W)
15470 (doubleword load/store)
15472 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
15473 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
15474 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
15475 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
15476 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
15478 Uppercase letters indicate bits that are already encoded at
15479 this point. Lowercase letters are our problem. For the
15480 second block of instructions, the secondary opcode nybble
15481 (bits 8..11) is present, and bit 23 is zero, even if this is
15482 a PC-relative operation. */
15483 newval = md_chars_to_number (buf, THUMB_SIZE);
15485 newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
15487 if ((newval & 0xf0000000) == 0xe0000000)
15489 /* Doubleword load/store: 8-bit offset, scaled by 4. */
15491 newval |= (1 << 23);
15494 if (value % 4 != 0)
15496 as_bad_where (fixP->fx_file, fixP->fx_line,
15497 _("offset not a multiple of 4"));
15503 as_bad_where (fixP->fx_file, fixP->fx_line,
15504 _("offset out of range"));
15509 else if ((newval & 0x000f0000) == 0x000f0000)
15511 /* PC-relative, 12-bit offset. */
15513 newval |= (1 << 23);
15518 as_bad_where (fixP->fx_file, fixP->fx_line,
15519 _("offset out of range"));
15524 else if ((newval & 0x00000100) == 0x00000100)
15526 /* Writeback: 8-bit, +/- offset. */
15528 newval |= (1 << 9);
15533 as_bad_where (fixP->fx_file, fixP->fx_line,
15534 _("offset out of range"));
15539 else if ((newval & 0x00000f00) == 0x00000e00)
15541 /* T-instruction: positive 8-bit offset. */
15542 if (value < 0 || value > 0xff)
15544 as_bad_where (fixP->fx_file, fixP->fx_line,
15545 _("offset out of range"));
15553 /* Positive 12-bit or negative 8-bit offset. */
15557 newval |= (1 << 23);
15567 as_bad_where (fixP->fx_file, fixP->fx_line,
15568 _("offset out of range"));
15575 md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
15576 md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
15579 case BFD_RELOC_ARM_SHIFT_IMM:
15580 newval = md_chars_to_number (buf, INSN_SIZE);
15581 if (((unsigned long) value) > 32
15583 && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
15585 as_bad_where (fixP->fx_file, fixP->fx_line,
15586 _("shift expression is too large"));
15591 /* Shifts of zero must be done as lsl. */
15593 else if (value == 32)
15595 newval &= 0xfffff07f;
15596 newval |= (value & 0x1f) << 7;
15597 md_number_to_chars (buf, newval, INSN_SIZE);
15600 case BFD_RELOC_ARM_T32_IMMEDIATE:
15601 case BFD_RELOC_ARM_T32_IMM12:
15602 case BFD_RELOC_ARM_T32_ADD_PC12:
15603 /* We claim that this fixup has been processed here,
15604 even if in fact we generate an error because we do
15605 not have a reloc for it, so tc_gen_reloc will reject it. */
15609 && ! S_IS_DEFINED (fixP->fx_addsy))
15611 as_bad_where (fixP->fx_file, fixP->fx_line,
15612 _("undefined symbol %s used as an immediate value"),
15613 S_GET_NAME (fixP->fx_addsy));
15617 newval = md_chars_to_number (buf, THUMB_SIZE);
15619 newval |= md_chars_to_number (buf+2, THUMB_SIZE);
15621 /* FUTURE: Implement analogue of negate_data_op for T32. */
15622 if (fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE)
15624 newimm = encode_thumb32_immediate (value);
15625 if (newimm == (unsigned int) FAIL)
15626 newimm = thumb32_negate_data_op (&newval, value);
15630 /* 12 bit immediate for addw/subw. */
15634 newval ^= 0x00a00000;
15637 newimm = (unsigned int) FAIL;
15642 if (newimm == (unsigned int)FAIL)
15644 as_bad_where (fixP->fx_file, fixP->fx_line,
15645 _("invalid constant (%lx) after fixup"),
15646 (unsigned long) value);
15650 newval |= (newimm & 0x800) << 15;
15651 newval |= (newimm & 0x700) << 4;
15652 newval |= (newimm & 0x0ff);
15654 md_number_to_chars (buf, (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
15655 md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
15658 case BFD_RELOC_ARM_SMC:
15659 if (((unsigned long) value) > 0xffff)
15660 as_bad_where (fixP->fx_file, fixP->fx_line,
15661 _("invalid smc expression"));
15662 newval = md_chars_to_number (buf, INSN_SIZE);
15663 newval |= (value & 0xf) | ((value & 0xfff0) << 4);
15664 md_number_to_chars (buf, newval, INSN_SIZE);
15667 case BFD_RELOC_ARM_SWI:
15668 if (fixP->tc_fix_data != 0)
15670 if (((unsigned long) value) > 0xff)
15671 as_bad_where (fixP->fx_file, fixP->fx_line,
15672 _("invalid swi expression"));
15673 newval = md_chars_to_number (buf, THUMB_SIZE);
15675 md_number_to_chars (buf, newval, THUMB_SIZE);
15679 if (((unsigned long) value) > 0x00ffffff)
15680 as_bad_where (fixP->fx_file, fixP->fx_line,
15681 _("invalid swi expression"));
15682 newval = md_chars_to_number (buf, INSN_SIZE);
15684 md_number_to_chars (buf, newval, INSN_SIZE);
15688 case BFD_RELOC_ARM_MULTI:
15689 if (((unsigned long) value) > 0xffff)
15690 as_bad_where (fixP->fx_file, fixP->fx_line,
15691 _("invalid expression in load/store multiple"));
15692 newval = value | md_chars_to_number (buf, INSN_SIZE);
15693 md_number_to_chars (buf, newval, INSN_SIZE);
15697 case BFD_RELOC_ARM_PCREL_CALL:
15698 newval = md_chars_to_number (buf, INSN_SIZE);
15699 if ((newval & 0xf0000000) == 0xf0000000)
15703 goto arm_branch_common;
15705 case BFD_RELOC_ARM_PCREL_JUMP:
15706 case BFD_RELOC_ARM_PLT32:
15708 case BFD_RELOC_ARM_PCREL_BRANCH:
15710 goto arm_branch_common;
15712 case BFD_RELOC_ARM_PCREL_BLX:
15715 /* We are going to store value (shifted right by two) in the
15716 instruction, in a 24 bit, signed field. Bits 26 through 32 either
15717 all clear or all set and bit 0 must be clear. For B/BL bit 1 must
15718 also be be clear. */
15720 as_bad_where (fixP->fx_file, fixP->fx_line,
15721 _("misaligned branch destination"));
15722 if ((value & (offsetT)0xfe000000) != (offsetT)0
15723 && (value & (offsetT)0xfe000000) != (offsetT)0xfe000000)
15724 as_bad_where (fixP->fx_file, fixP->fx_line,
15725 _("branch out of range"));
15727 if (fixP->fx_done || !seg->use_rela_p)
15729 newval = md_chars_to_number (buf, INSN_SIZE);
15730 newval |= (value >> 2) & 0x00ffffff;
15731 /* Set the H bit on BLX instructions. */
15735 newval |= 0x01000000;
15737 newval &= ~0x01000000;
15739 md_number_to_chars (buf, newval, INSN_SIZE);
15743 case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CZB */
15744 /* CZB can only branch forward. */
15746 as_bad_where (fixP->fx_file, fixP->fx_line,
15747 _("branch out of range"));
15749 if (fixP->fx_done || !seg->use_rela_p)
15751 newval = md_chars_to_number (buf, THUMB_SIZE);
15752 newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
15753 md_number_to_chars (buf, newval, THUMB_SIZE);
15757 case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch. */
15758 if ((value & ~0xff) && ((value & ~0xff) != ~0xff))
15759 as_bad_where (fixP->fx_file, fixP->fx_line,
15760 _("branch out of range"));
15762 if (fixP->fx_done || !seg->use_rela_p)
15764 newval = md_chars_to_number (buf, THUMB_SIZE);
15765 newval |= (value & 0x1ff) >> 1;
15766 md_number_to_chars (buf, newval, THUMB_SIZE);
15770 case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch. */
15771 if ((value & ~0x7ff) && ((value & ~0x7ff) != ~0x7ff))
15772 as_bad_where (fixP->fx_file, fixP->fx_line,
15773 _("branch out of range"));
15775 if (fixP->fx_done || !seg->use_rela_p)
15777 newval = md_chars_to_number (buf, THUMB_SIZE);
15778 newval |= (value & 0xfff) >> 1;
15779 md_number_to_chars (buf, newval, THUMB_SIZE);
15783 case BFD_RELOC_THUMB_PCREL_BRANCH20:
15784 if ((value & ~0x1fffff) && ((value & ~0x1fffff) != ~0x1fffff))
15785 as_bad_where (fixP->fx_file, fixP->fx_line,
15786 _("conditional branch out of range"));
15788 if (fixP->fx_done || !seg->use_rela_p)
15791 addressT S, J1, J2, lo, hi;
15793 S = (value & 0x00100000) >> 20;
15794 J2 = (value & 0x00080000) >> 19;
15795 J1 = (value & 0x00040000) >> 18;
15796 hi = (value & 0x0003f000) >> 12;
15797 lo = (value & 0x00000ffe) >> 1;
15799 newval = md_chars_to_number (buf, THUMB_SIZE);
15800 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15801 newval |= (S << 10) | hi;
15802 newval2 |= (J1 << 13) | (J2 << 11) | lo;
15803 md_number_to_chars (buf, newval, THUMB_SIZE);
15804 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
15808 case BFD_RELOC_THUMB_PCREL_BLX:
15809 case BFD_RELOC_THUMB_PCREL_BRANCH23:
15810 if ((value & ~0x3fffff) && ((value & ~0x3fffff) != ~0x3fffff))
15811 as_bad_where (fixP->fx_file, fixP->fx_line,
15812 _("branch out of range"));
15814 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
15815 /* For a BLX instruction, make sure that the relocation is rounded up
15816 to a word boundary. This follows the semantics of the instruction
15817 which specifies that bit 1 of the target address will come from bit
15818 1 of the base address. */
15819 value = (value + 1) & ~ 1;
15821 if (fixP->fx_done || !seg->use_rela_p)
15825 newval = md_chars_to_number (buf, THUMB_SIZE);
15826 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15827 newval |= (value & 0x7fffff) >> 12;
15828 newval2 |= (value & 0xfff) >> 1;
15829 md_number_to_chars (buf, newval, THUMB_SIZE);
15830 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
15834 case BFD_RELOC_THUMB_PCREL_BRANCH25:
15835 if ((value & ~0x1ffffff) && ((value & ~0x1ffffff) != ~0x1ffffff))
15836 as_bad_where (fixP->fx_file, fixP->fx_line,
15837 _("branch out of range"));
15839 if (fixP->fx_done || !seg->use_rela_p)
15842 addressT S, I1, I2, lo, hi;
15844 S = (value & 0x01000000) >> 24;
15845 I1 = (value & 0x00800000) >> 23;
15846 I2 = (value & 0x00400000) >> 22;
15847 hi = (value & 0x003ff000) >> 12;
15848 lo = (value & 0x00000ffe) >> 1;
15853 newval = md_chars_to_number (buf, THUMB_SIZE);
15854 newval2 = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
15855 newval |= (S << 10) | hi;
15856 newval2 |= (I1 << 13) | (I2 << 11) | lo;
15857 md_number_to_chars (buf, newval, THUMB_SIZE);
15858 md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
15863 if (fixP->fx_done || !seg->use_rela_p)
15864 md_number_to_chars (buf, value, 1);
15868 if (fixP->fx_done || !seg->use_rela_p)
15869 md_number_to_chars (buf, value, 2);
15873 case BFD_RELOC_ARM_TLS_GD32:
15874 case BFD_RELOC_ARM_TLS_LE32:
15875 case BFD_RELOC_ARM_TLS_IE32:
15876 case BFD_RELOC_ARM_TLS_LDM32:
15877 case BFD_RELOC_ARM_TLS_LDO32:
15878 S_SET_THREAD_LOCAL (fixP->fx_addsy);
15881 case BFD_RELOC_ARM_GOT32:
15882 case BFD_RELOC_ARM_GOTOFF:
15883 case BFD_RELOC_ARM_TARGET2:
15884 if (fixP->fx_done || !seg->use_rela_p)
15885 md_number_to_chars (buf, 0, 4);
15889 case BFD_RELOC_RVA:
15891 case BFD_RELOC_ARM_TARGET1:
15892 case BFD_RELOC_ARM_ROSEGREL32:
15893 case BFD_RELOC_ARM_SBREL32:
15894 case BFD_RELOC_32_PCREL:
15895 if (fixP->fx_done || !seg->use_rela_p)
15896 md_number_to_chars (buf, value, 4);
15900 case BFD_RELOC_ARM_PREL31:
15901 if (fixP->fx_done || !seg->use_rela_p)
15903 newval = md_chars_to_number (buf, 4) & 0x80000000;
15904 if ((value ^ (value >> 1)) & 0x40000000)
15906 as_bad_where (fixP->fx_file, fixP->fx_line,
15907 _("rel31 relocation overflow"));
15909 newval |= value & 0x7fffffff;
15910 md_number_to_chars (buf, newval, 4);
15915 case BFD_RELOC_ARM_CP_OFF_IMM:
15916 case BFD_RELOC_ARM_T32_CP_OFF_IMM:
15917 if (value < -1023 || value > 1023 || (value & 3))
15918 as_bad_where (fixP->fx_file, fixP->fx_line,
15919 _("co-processor offset out of range"));
15924 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
15925 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
15926 newval = md_chars_to_number (buf, INSN_SIZE);
15928 newval = get_thumb32_insn (buf);
15929 newval &= 0xff7fff00;
15930 newval |= (value >> 2) | (sign ? INDEX_UP : 0);
15932 newval &= ~WRITE_BACK;
15933 if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
15934 || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
15935 md_number_to_chars (buf, newval, INSN_SIZE);
15937 put_thumb32_insn (buf, newval);
15940 case BFD_RELOC_ARM_CP_OFF_IMM_S2:
15941 case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
15942 if (value < -255 || value > 255)
15943 as_bad_where (fixP->fx_file, fixP->fx_line,
15944 _("co-processor offset out of range"));
15945 goto cp_off_common;
15947 case BFD_RELOC_ARM_THUMB_OFFSET:
15948 newval = md_chars_to_number (buf, THUMB_SIZE);
15949 /* Exactly what ranges, and where the offset is inserted depends
15950 on the type of instruction, we can establish this from the
15952 switch (newval >> 12)
15954 case 4: /* PC load. */
15955 /* Thumb PC loads are somewhat odd, bit 1 of the PC is
15956 forced to zero for these loads; md_pcrel_from has already
15957 compensated for this. */
15959 as_bad_where (fixP->fx_file, fixP->fx_line,
15960 _("invalid offset, target not word aligned (0x%08lX)"),
15961 (((unsigned long) fixP->fx_frag->fr_address
15962 + (unsigned long) fixP->fx_where) & ~3)
15963 + (unsigned long) value);
15965 if (value & ~0x3fc)
15966 as_bad_where (fixP->fx_file, fixP->fx_line,
15967 _("invalid offset, value too big (0x%08lX)"),
15970 newval |= value >> 2;
15973 case 9: /* SP load/store. */
15974 if (value & ~0x3fc)
15975 as_bad_where (fixP->fx_file, fixP->fx_line,
15976 _("invalid offset, value too big (0x%08lX)"),
15978 newval |= value >> 2;
15981 case 6: /* Word load/store. */
15983 as_bad_where (fixP->fx_file, fixP->fx_line,
15984 _("invalid offset, value too big (0x%08lX)"),
15986 newval |= value << 4; /* 6 - 2. */
15989 case 7: /* Byte load/store. */
15991 as_bad_where (fixP->fx_file, fixP->fx_line,
15992 _("invalid offset, value too big (0x%08lX)"),
15994 newval |= value << 6;
15997 case 8: /* Halfword load/store. */
15999 as_bad_where (fixP->fx_file, fixP->fx_line,
16000 _("invalid offset, value too big (0x%08lX)"),
16002 newval |= value << 5; /* 6 - 1. */
16006 as_bad_where (fixP->fx_file, fixP->fx_line,
16007 "Unable to process relocation for thumb opcode: %lx",
16008 (unsigned long) newval);
16011 md_number_to_chars (buf, newval, THUMB_SIZE);
16014 case BFD_RELOC_ARM_THUMB_ADD:
16015 /* This is a complicated relocation, since we use it for all of
16016 the following immediate relocations:
16020 9bit ADD/SUB SP word-aligned
16021 10bit ADD PC/SP word-aligned
16023 The type of instruction being processed is encoded in the
16030 newval = md_chars_to_number (buf, THUMB_SIZE);
16032 int rd = (newval >> 4) & 0xf;
16033 int rs = newval & 0xf;
16034 int subtract = !!(newval & 0x8000);
16036 /* Check for HI regs, only very restricted cases allowed:
16037 Adjusting SP, and using PC or SP to get an address. */
16038 if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
16039 || (rs > 7 && rs != REG_SP && rs != REG_PC))
16040 as_bad_where (fixP->fx_file, fixP->fx_line,
16041 _("invalid Hi register with immediate"));
16043 /* If value is negative, choose the opposite instruction. */
16047 subtract = !subtract;
16049 as_bad_where (fixP->fx_file, fixP->fx_line,
16050 _("immediate value out of range"));
16055 if (value & ~0x1fc)
16056 as_bad_where (fixP->fx_file, fixP->fx_line,
16057 _("invalid immediate for stack address calculation"));
16058 newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
16059 newval |= value >> 2;
16061 else if (rs == REG_PC || rs == REG_SP)
16063 if (subtract || value & ~0x3fc)
16064 as_bad_where (fixP->fx_file, fixP->fx_line,
16065 _("invalid immediate for address calculation (value = 0x%08lX)"),
16066 (unsigned long) value);
16067 newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
16069 newval |= value >> 2;
16074 as_bad_where (fixP->fx_file, fixP->fx_line,
16075 _("immediate value out of range"));
16076 newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
16077 newval |= (rd << 8) | value;
16082 as_bad_where (fixP->fx_file, fixP->fx_line,
16083 _("immediate value out of range"));
16084 newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
16085 newval |= rd | (rs << 3) | (value << 6);
16088 md_number_to_chars (buf, newval, THUMB_SIZE);
16091 case BFD_RELOC_ARM_THUMB_IMM:
16092 newval = md_chars_to_number (buf, THUMB_SIZE);
16093 if (value < 0 || value > 255)
16094 as_bad_where (fixP->fx_file, fixP->fx_line,
16095 _("invalid immediate: %ld is too large"),
16098 md_number_to_chars (buf, newval, THUMB_SIZE);
16101 case BFD_RELOC_ARM_THUMB_SHIFT:
16102 /* 5bit shift value (0..32). LSL cannot take 32. */
16103 newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
16104 temp = newval & 0xf800;
16105 if (value < 0 || value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
16106 as_bad_where (fixP->fx_file, fixP->fx_line,
16107 _("invalid shift value: %ld"), (long) value);
16108 /* Shifts of zero must be encoded as LSL. */
16110 newval = (newval & 0x003f) | T_OPCODE_LSL_I;
16111 /* Shifts of 32 are encoded as zero. */
16112 else if (value == 32)
16114 newval |= value << 6;
16115 md_number_to_chars (buf, newval, THUMB_SIZE);
16118 case BFD_RELOC_VTABLE_INHERIT:
16119 case BFD_RELOC_VTABLE_ENTRY:
16123 case BFD_RELOC_UNUSED:
16125 as_bad_where (fixP->fx_file, fixP->fx_line,
16126 _("bad relocation fixup type (%d)"), fixP->fx_r_type);
16130 /* Translate internal representation of relocation info to BFD target
16134 tc_gen_reloc (asection *section, fixS *fixp)
16137 bfd_reloc_code_real_type code;
16139 reloc = xmalloc (sizeof (arelent));
16141 reloc->sym_ptr_ptr = xmalloc (sizeof (asymbol *));
16142 *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
16143 reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
16145 if (fixp->fx_pcrel)
16147 if (section->use_rela_p)
16148 fixp->fx_offset -= md_pcrel_from_section (fixp, section);
16150 fixp->fx_offset = reloc->address;
16152 reloc->addend = fixp->fx_offset;
16154 switch (fixp->fx_r_type)
16157 if (fixp->fx_pcrel)
16159 code = BFD_RELOC_8_PCREL;
16164 if (fixp->fx_pcrel)
16166 code = BFD_RELOC_16_PCREL;
16171 if (fixp->fx_pcrel)
16173 code = BFD_RELOC_32_PCREL;
16177 case BFD_RELOC_NONE:
16178 case BFD_RELOC_ARM_PCREL_BRANCH:
16179 case BFD_RELOC_ARM_PCREL_BLX:
16180 case BFD_RELOC_RVA:
16181 case BFD_RELOC_THUMB_PCREL_BRANCH7:
16182 case BFD_RELOC_THUMB_PCREL_BRANCH9:
16183 case BFD_RELOC_THUMB_PCREL_BRANCH12:
16184 case BFD_RELOC_THUMB_PCREL_BRANCH20:
16185 case BFD_RELOC_THUMB_PCREL_BRANCH23:
16186 case BFD_RELOC_THUMB_PCREL_BRANCH25:
16187 case BFD_RELOC_THUMB_PCREL_BLX:
16188 case BFD_RELOC_VTABLE_ENTRY:
16189 case BFD_RELOC_VTABLE_INHERIT:
16190 code = fixp->fx_r_type;
16193 case BFD_RELOC_ARM_LITERAL:
16194 case BFD_RELOC_ARM_HWLITERAL:
16195 /* If this is called then the a literal has
16196 been referenced across a section boundary. */
16197 as_bad_where (fixp->fx_file, fixp->fx_line,
16198 _("literal referenced across section boundary"));
16202 case BFD_RELOC_ARM_GOT32:
16203 case BFD_RELOC_ARM_GOTOFF:
16204 case BFD_RELOC_ARM_PLT32:
16205 case BFD_RELOC_ARM_TARGET1:
16206 case BFD_RELOC_ARM_ROSEGREL32:
16207 case BFD_RELOC_ARM_SBREL32:
16208 case BFD_RELOC_ARM_PREL31:
16209 case BFD_RELOC_ARM_TARGET2:
16210 case BFD_RELOC_ARM_TLS_LE32:
16211 case BFD_RELOC_ARM_TLS_LDO32:
16212 case BFD_RELOC_ARM_PCREL_CALL:
16213 case BFD_RELOC_ARM_PCREL_JUMP:
16214 code = fixp->fx_r_type;
16217 case BFD_RELOC_ARM_TLS_GD32:
16218 case BFD_RELOC_ARM_TLS_IE32:
16219 case BFD_RELOC_ARM_TLS_LDM32:
16220 /* BFD will include the symbol's address in the addend.
16221 But we don't want that, so subtract it out again here. */
16222 if (!S_IS_COMMON (fixp->fx_addsy))
16223 reloc->addend -= (*reloc->sym_ptr_ptr)->value;
16224 code = fixp->fx_r_type;
16228 case BFD_RELOC_ARM_IMMEDIATE:
16229 as_bad_where (fixp->fx_file, fixp->fx_line,
16230 _("internal relocation (type: IMMEDIATE) not fixed up"));
16233 case BFD_RELOC_ARM_ADRL_IMMEDIATE:
16234 as_bad_where (fixp->fx_file, fixp->fx_line,
16235 _("ADRL used for a symbol not defined in the same file"));
16238 case BFD_RELOC_ARM_OFFSET_IMM:
16239 if (section->use_rela_p)
16241 code = fixp->fx_r_type;
16245 if (fixp->fx_addsy != NULL
16246 && !S_IS_DEFINED (fixp->fx_addsy)
16247 && S_IS_LOCAL (fixp->fx_addsy))
16249 as_bad_where (fixp->fx_file, fixp->fx_line,
16250 _("undefined local label `%s'"),
16251 S_GET_NAME (fixp->fx_addsy));
16255 as_bad_where (fixp->fx_file, fixp->fx_line,
16256 _("internal_relocation (type: OFFSET_IMM) not fixed up"));
16263 switch (fixp->fx_r_type)
16265 case BFD_RELOC_NONE: type = "NONE"; break;
16266 case BFD_RELOC_ARM_OFFSET_IMM8: type = "OFFSET_IMM8"; break;
16267 case BFD_RELOC_ARM_SHIFT_IMM: type = "SHIFT_IMM"; break;
16268 case BFD_RELOC_ARM_SMC: type = "SMC"; break;
16269 case BFD_RELOC_ARM_SWI: type = "SWI"; break;
16270 case BFD_RELOC_ARM_MULTI: type = "MULTI"; break;
16271 case BFD_RELOC_ARM_CP_OFF_IMM: type = "CP_OFF_IMM"; break;
16272 case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
16273 case BFD_RELOC_ARM_THUMB_ADD: type = "THUMB_ADD"; break;
16274 case BFD_RELOC_ARM_THUMB_SHIFT: type = "THUMB_SHIFT"; break;
16275 case BFD_RELOC_ARM_THUMB_IMM: type = "THUMB_IMM"; break;
16276 case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
16277 default: type = _("<unknown>"); break;
16279 as_bad_where (fixp->fx_file, fixp->fx_line,
16280 _("cannot represent %s relocation in this object file format"),
16287 if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
16289 && fixp->fx_addsy == GOT_symbol)
16291 code = BFD_RELOC_ARM_GOTPC;
16292 reloc->addend = fixp->fx_offset = reloc->address;
16296 reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
16298 if (reloc->howto == NULL)
16300 as_bad_where (fixp->fx_file, fixp->fx_line,
16301 _("cannot represent %s relocation in this object file format"),
16302 bfd_get_reloc_code_name (code));
16306 /* HACK: Since arm ELF uses Rel instead of Rela, encode the
16307 vtable entry to be used in the relocation's section offset. */
16308 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16309 reloc->address = fixp->fx_offset;
16314 /* This fix_new is called by cons via TC_CONS_FIX_NEW. */
16317 cons_fix_new_arm (fragS * frag,
16322 bfd_reloc_code_real_type type;
16326 FIXME: @@ Should look at CPU word size. */
16330 type = BFD_RELOC_8;
16333 type = BFD_RELOC_16;
16337 type = BFD_RELOC_32;
16340 type = BFD_RELOC_64;
16344 fix_new_exp (frag, where, (int) size, exp, pcrel, type);
16347 #if defined OBJ_COFF || defined OBJ_ELF
16349 arm_validate_fix (fixS * fixP)
16351 /* If the destination of the branch is a defined symbol which does not have
16352 the THUMB_FUNC attribute, then we must be calling a function which has
16353 the (interfacearm) attribute. We look for the Thumb entry point to that
16354 function and change the branch to refer to that function instead. */
16355 if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
16356 && fixP->fx_addsy != NULL
16357 && S_IS_DEFINED (fixP->fx_addsy)
16358 && ! THUMB_IS_FUNC (fixP->fx_addsy))
16360 fixP->fx_addsy = find_real_start (fixP->fx_addsy);
16366 arm_force_relocation (struct fix * fixp)
16368 #if defined (OBJ_COFF) && defined (TE_PE)
16369 if (fixp->fx_r_type == BFD_RELOC_RVA)
16373 /* Resolve these relocations even if the symbol is extern or weak. */
16374 if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
16375 || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
16376 || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
16377 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
16378 || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
16379 || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12)
16382 return generic_force_reloc (fixp);
16386 /* This is a little hack to help the gas/arm/adrl.s test. It prevents
16387 local labels from being added to the output symbol table when they
16388 are used with the ADRL pseudo op. The ADRL relocation should always
16389 be resolved before the binbary is emitted, so it is safe to say that
16390 it is adjustable. */
16393 arm_fix_adjustable (fixS * fixP)
16395 if (fixP->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE)
16402 /* Relocations against Thumb function names must be left unadjusted,
16403 so that the linker can use this information to correctly set the
16404 bottom bit of their addresses. The MIPS version of this function
16405 also prevents relocations that are mips-16 specific, but I do not
16406 know why it does this.
16409 There is one other problem that ought to be addressed here, but
16410 which currently is not: Taking the address of a label (rather
16411 than a function) and then later jumping to that address. Such
16412 addresses also ought to have their bottom bit set (assuming that
16413 they reside in Thumb code), but at the moment they will not. */
16416 arm_fix_adjustable (fixS * fixP)
16418 if (fixP->fx_addsy == NULL)
16421 if (THUMB_IS_FUNC (fixP->fx_addsy)
16422 && fixP->fx_subsy == NULL)
16425 /* We need the symbol name for the VTABLE entries. */
16426 if ( fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
16427 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
16430 /* Don't allow symbols to be discarded on GOT related relocs. */
16431 if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
16432 || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
16433 || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
16434 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
16435 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
16436 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
16437 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
16438 || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
16439 || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
16446 elf32_arm_target_format (void)
16449 return (target_big_endian
16450 ? "elf32-bigarm-symbian"
16451 : "elf32-littlearm-symbian");
16452 #elif defined (TE_VXWORKS)
16453 return (target_big_endian
16454 ? "elf32-bigarm-vxworks"
16455 : "elf32-littlearm-vxworks");
16457 if (target_big_endian)
16458 return "elf32-bigarm";
16460 return "elf32-littlearm";
16465 armelf_frob_symbol (symbolS * symp,
16468 elf_frob_symbol (symp, puntp);
16472 /* MD interface: Finalization. */
16474 /* A good place to do this, although this was probably not intended
16475 for this kind of use. We need to dump the literal pool before
16476 references are made to a null symbol pointer. */
16481 literal_pool * pool;
16483 for (pool = list_of_pools; pool; pool = pool->next)
16485 /* Put it at the end of the relevent section. */
16486 subseg_set (pool->section, pool->sub_section);
16488 arm_elf_change_section ();
16494 /* Adjust the symbol table. This marks Thumb symbols as distinct from
16498 arm_adjust_symtab (void)
16503 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
16505 if (ARM_IS_THUMB (sym))
16507 if (THUMB_IS_FUNC (sym))
16509 /* Mark the symbol as a Thumb function. */
16510 if ( S_GET_STORAGE_CLASS (sym) == C_STAT
16511 || S_GET_STORAGE_CLASS (sym) == C_LABEL) /* This can happen! */
16512 S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
16514 else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
16515 S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
16517 as_bad (_("%s: unexpected function type: %d"),
16518 S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
16520 else switch (S_GET_STORAGE_CLASS (sym))
16523 S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
16526 S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
16529 S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
16537 if (ARM_IS_INTERWORK (sym))
16538 coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
16545 for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
16547 if (ARM_IS_THUMB (sym))
16549 elf_symbol_type * elf_sym;
16551 elf_sym = elf_symbol (symbol_get_bfdsym (sym));
16552 bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
16554 if (! bfd_is_arm_mapping_symbol_name (elf_sym->symbol.name))
16556 /* If it's a .thumb_func, declare it as so,
16557 otherwise tag label as .code 16. */
16558 if (THUMB_IS_FUNC (sym))
16559 elf_sym->internal_elf_sym.st_info =
16560 ELF_ST_INFO (bind, STT_ARM_TFUNC);
16562 elf_sym->internal_elf_sym.st_info =
16563 ELF_ST_INFO (bind, STT_ARM_16BIT);
16570 /* MD interface: Initialization. */
16573 set_constant_flonums (void)
16577 for (i = 0; i < NUM_FLOAT_VALS; i++)
16578 if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
16588 if ( (arm_ops_hsh = hash_new ()) == NULL
16589 || (arm_cond_hsh = hash_new ()) == NULL
16590 || (arm_shift_hsh = hash_new ()) == NULL
16591 || (arm_psr_hsh = hash_new ()) == NULL
16592 || (arm_v7m_psr_hsh = hash_new ()) == NULL
16593 || (arm_reg_hsh = hash_new ()) == NULL
16594 || (arm_reloc_hsh = hash_new ()) == NULL
16595 || (arm_barrier_opt_hsh = hash_new ()) == NULL)
16596 as_fatal (_("virtual memory exhausted"));
16598 for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
16599 hash_insert (arm_ops_hsh, insns[i].template, (PTR) (insns + i));
16600 for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
16601 hash_insert (arm_cond_hsh, conds[i].template, (PTR) (conds + i));
16602 for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
16603 hash_insert (arm_shift_hsh, shift_names[i].name, (PTR) (shift_names + i));
16604 for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
16605 hash_insert (arm_psr_hsh, psrs[i].template, (PTR) (psrs + i));
16606 for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
16607 hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template, (PTR) (v7m_psrs + i));
16608 for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
16609 hash_insert (arm_reg_hsh, reg_names[i].name, (PTR) (reg_names + i));
16611 i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
16613 hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template,
16614 (PTR) (barrier_opt_names + i));
16616 for (i = 0; i < sizeof (reloc_names) / sizeof (struct reloc_entry); i++)
16617 hash_insert (arm_reloc_hsh, reloc_names[i].name, (PTR) (reloc_names + i));
16620 set_constant_flonums ();
16622 /* Set the cpu variant based on the command-line options. We prefer
16623 -mcpu= over -march= if both are set (as for GCC); and we prefer
16624 -mfpu= over any other way of setting the floating point unit.
16625 Use of legacy options with new options are faulted. */
16628 if (mcpu_cpu_opt || march_cpu_opt)
16629 as_bad (_("use of old and new-style options to set CPU type"));
16631 mcpu_cpu_opt = legacy_cpu;
16633 else if (!mcpu_cpu_opt)
16634 mcpu_cpu_opt = march_cpu_opt;
16639 as_bad (_("use of old and new-style options to set FPU type"));
16641 mfpu_opt = legacy_fpu;
16643 else if (!mfpu_opt)
16645 #if !(defined (TE_LINUX) || defined (TE_NetBSD) || defined (TE_VXWORKS))
16646 /* Some environments specify a default FPU. If they don't, infer it
16647 from the processor. */
16649 mfpu_opt = mcpu_fpu_opt;
16651 mfpu_opt = march_fpu_opt;
16653 mfpu_opt = &fpu_default;
16660 mfpu_opt = &fpu_default;
16661 else if (ARM_CPU_HAS_FEATURE (*mcpu_fpu_opt, arm_ext_v5))
16662 mfpu_opt = &fpu_arch_vfp_v2;
16664 mfpu_opt = &fpu_arch_fpa;
16670 mcpu_cpu_opt = &cpu_default;
16671 selected_cpu = cpu_default;
16675 selected_cpu = *mcpu_cpu_opt;
16677 mcpu_cpu_opt = &arm_arch_any;
16680 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
16682 arm_arch_used = thumb_arch_used = arm_arch_none;
16684 #if defined OBJ_COFF || defined OBJ_ELF
16686 unsigned int flags = 0;
16688 #if defined OBJ_ELF
16689 flags = meabi_flags;
16691 switch (meabi_flags)
16693 case EF_ARM_EABI_UNKNOWN:
16695 /* Set the flags in the private structure. */
16696 if (uses_apcs_26) flags |= F_APCS26;
16697 if (support_interwork) flags |= F_INTERWORK;
16698 if (uses_apcs_float) flags |= F_APCS_FLOAT;
16699 if (pic_code) flags |= F_PIC;
16700 if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
16701 flags |= F_SOFT_FLOAT;
16703 switch (mfloat_abi_opt)
16705 case ARM_FLOAT_ABI_SOFT:
16706 case ARM_FLOAT_ABI_SOFTFP:
16707 flags |= F_SOFT_FLOAT;
16710 case ARM_FLOAT_ABI_HARD:
16711 if (flags & F_SOFT_FLOAT)
16712 as_bad (_("hard-float conflicts with specified fpu"));
16716 /* Using pure-endian doubles (even if soft-float). */
16717 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
16718 flags |= F_VFP_FLOAT;
16720 #if defined OBJ_ELF
16721 if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
16722 flags |= EF_ARM_MAVERICK_FLOAT;
16725 case EF_ARM_EABI_VER4:
16726 case EF_ARM_EABI_VER5:
16727 /* No additional flags to set. */
16734 bfd_set_private_flags (stdoutput, flags);
16736 /* We have run out flags in the COFF header to encode the
16737 status of ATPCS support, so instead we create a dummy,
16738 empty, debug section called .arm.atpcs. */
16743 sec = bfd_make_section (stdoutput, ".arm.atpcs");
16747 bfd_set_section_flags
16748 (stdoutput, sec, SEC_READONLY | SEC_DEBUGGING /* | SEC_HAS_CONTENTS */);
16749 bfd_set_section_size (stdoutput, sec, 0);
16750 bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
16756 /* Record the CPU type as well. */
16757 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
16758 mach = bfd_mach_arm_iWMMXt;
16759 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
16760 mach = bfd_mach_arm_XScale;
16761 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
16762 mach = bfd_mach_arm_ep9312;
16763 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
16764 mach = bfd_mach_arm_5TE;
16765 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
16767 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
16768 mach = bfd_mach_arm_5T;
16770 mach = bfd_mach_arm_5;
16772 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
16774 if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
16775 mach = bfd_mach_arm_4T;
16777 mach = bfd_mach_arm_4;
16779 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
16780 mach = bfd_mach_arm_3M;
16781 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
16782 mach = bfd_mach_arm_3;
16783 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
16784 mach = bfd_mach_arm_2a;
16785 else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
16786 mach = bfd_mach_arm_2;
16788 mach = bfd_mach_arm_unknown;
16790 bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
16793 /* Command line processing. */
16796 Invocation line includes a switch not recognized by the base assembler.
16797 See if it's a processor-specific option.
16799 This routine is somewhat complicated by the need for backwards
16800 compatibility (since older releases of gcc can't be changed).
16801 The new options try to make the interface as compatible as
16804 New options (supported) are:
16806 -mcpu=<cpu name> Assemble for selected processor
16807 -march=<architecture name> Assemble for selected architecture
16808 -mfpu=<fpu architecture> Assemble for selected FPU.
16809 -EB/-mbig-endian Big-endian
16810 -EL/-mlittle-endian Little-endian
16811 -k Generate PIC code
16812 -mthumb Start in Thumb mode
16813 -mthumb-interwork Code supports ARM/Thumb interworking
16815 For now we will also provide support for:
16817 -mapcs-32 32-bit Program counter
16818 -mapcs-26 26-bit Program counter
16819 -macps-float Floats passed in FP registers
16820 -mapcs-reentrant Reentrant code
16822 (sometime these will probably be replaced with -mapcs=<list of options>
16823 and -matpcs=<list of options>)
16825 The remaining options are only supported for back-wards compatibility.
16826 Cpu variants, the arm part is optional:
16827 -m[arm]1 Currently not supported.
16828 -m[arm]2, -m[arm]250 Arm 2 and Arm 250 processor
16829 -m[arm]3 Arm 3 processor
16830 -m[arm]6[xx], Arm 6 processors
16831 -m[arm]7[xx][t][[d]m] Arm 7 processors
16832 -m[arm]8[10] Arm 8 processors
16833 -m[arm]9[20][tdmi] Arm 9 processors
16834 -mstrongarm[110[0]] StrongARM processors
16835 -mxscale XScale processors
16836 -m[arm]v[2345[t[e]]] Arm architectures
16837 -mall All (except the ARM1)
16839 -mfpa10, -mfpa11 FPA10 and 11 co-processor instructions
16840 -mfpe-old (No float load/store multiples)
16841 -mvfpxd VFP Single precision
16843 -mno-fpu Disable all floating point instructions
16845 The following CPU names are recognized:
16846 arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
16847 arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
16848 arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
16849 arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
16850 arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
16851 arm10t arm10e, arm1020t, arm1020e, arm10200e,
16852 strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
16856 const char * md_shortopts = "m:k";
16858 #ifdef ARM_BI_ENDIAN
16859 #define OPTION_EB (OPTION_MD_BASE + 0)
16860 #define OPTION_EL (OPTION_MD_BASE + 1)
16862 #if TARGET_BYTES_BIG_ENDIAN
16863 #define OPTION_EB (OPTION_MD_BASE + 0)
16865 #define OPTION_EL (OPTION_MD_BASE + 1)
16869 struct option md_longopts[] =
16872 {"EB", no_argument, NULL, OPTION_EB},
16875 {"EL", no_argument, NULL, OPTION_EL},
16877 {NULL, no_argument, NULL, 0}
16880 size_t md_longopts_size = sizeof (md_longopts);
16882 struct arm_option_table
16884 char *option; /* Option name to match. */
16885 char *help; /* Help information. */
16886 int *var; /* Variable to change. */
16887 int value; /* What to change it to. */
16888 char *deprecated; /* If non-null, print this message. */
16891 struct arm_option_table arm_opts[] =
16893 {"k", N_("generate PIC code"), &pic_code, 1, NULL},
16894 {"mthumb", N_("assemble Thumb code"), &thumb_mode, 1, NULL},
16895 {"mthumb-interwork", N_("support ARM/Thumb interworking"),
16896 &support_interwork, 1, NULL},
16897 {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
16898 {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
16899 {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
16901 {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
16902 {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
16903 {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
16904 {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
16907 /* These are recognized by the assembler, but have no affect on code. */
16908 {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
16909 {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
16910 {NULL, NULL, NULL, 0, NULL}
16913 struct arm_legacy_option_table
16915 char *option; /* Option name to match. */
16916 const arm_feature_set **var; /* Variable to change. */
16917 const arm_feature_set value; /* What to change it to. */
16918 char *deprecated; /* If non-null, print this message. */
16921 const struct arm_legacy_option_table arm_legacy_opts[] =
16923 /* DON'T add any new processors to this list -- we want the whole list
16924 to go away... Add them to the processors table instead. */
16925 {"marm1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
16926 {"m1", &legacy_cpu, ARM_ARCH_V1, N_("use -mcpu=arm1")},
16927 {"marm2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
16928 {"m2", &legacy_cpu, ARM_ARCH_V2, N_("use -mcpu=arm2")},
16929 {"marm250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
16930 {"m250", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
16931 {"marm3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
16932 {"m3", &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
16933 {"marm6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
16934 {"m6", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm6")},
16935 {"marm600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
16936 {"m600", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm600")},
16937 {"marm610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
16938 {"m610", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm610")},
16939 {"marm620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
16940 {"m620", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm620")},
16941 {"marm7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
16942 {"m7", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7")},
16943 {"marm70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
16944 {"m70", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm70")},
16945 {"marm700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
16946 {"m700", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700")},
16947 {"marm700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
16948 {"m700i", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm700i")},
16949 {"marm710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
16950 {"m710", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710")},
16951 {"marm710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
16952 {"m710c", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm710c")},
16953 {"marm720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
16954 {"m720", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm720")},
16955 {"marm7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
16956 {"m7d", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7d")},
16957 {"marm7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
16958 {"m7di", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7di")},
16959 {"marm7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
16960 {"m7m", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
16961 {"marm7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
16962 {"m7dm", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
16963 {"marm7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
16964 {"m7dmi", &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
16965 {"marm7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
16966 {"m7100", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7100")},
16967 {"marm7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
16968 {"m7500", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500")},
16969 {"marm7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
16970 {"m7500fe", &legacy_cpu, ARM_ARCH_V3, N_("use -mcpu=arm7500fe")},
16971 {"marm7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
16972 {"m7t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
16973 {"marm7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
16974 {"m7tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
16975 {"marm710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
16976 {"m710t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
16977 {"marm720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
16978 {"m720t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
16979 {"marm740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
16980 {"m740t", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
16981 {"marm8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
16982 {"m8", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm8")},
16983 {"marm810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
16984 {"m810", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=arm810")},
16985 {"marm9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
16986 {"m9", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
16987 {"marm9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
16988 {"m9tdmi", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
16989 {"marm920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
16990 {"m920", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
16991 {"marm940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
16992 {"m940", &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
16993 {"mstrongarm", &legacy_cpu, ARM_ARCH_V4, N_("use -mcpu=strongarm")},
16994 {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
16995 N_("use -mcpu=strongarm110")},
16996 {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
16997 N_("use -mcpu=strongarm1100")},
16998 {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
16999 N_("use -mcpu=strongarm1110")},
17000 {"mxscale", &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
17001 {"miwmmxt", &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
17002 {"mall", &legacy_cpu, ARM_ANY, N_("use -mcpu=all")},
17004 /* Architecture variants -- don't add any more to this list either. */
17005 {"mv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17006 {"marmv2", &legacy_cpu, ARM_ARCH_V2, N_("use -march=armv2")},
17007 {"mv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17008 {"marmv2a", &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
17009 {"mv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17010 {"marmv3", &legacy_cpu, ARM_ARCH_V3, N_("use -march=armv3")},
17011 {"mv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17012 {"marmv3m", &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
17013 {"mv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17014 {"marmv4", &legacy_cpu, ARM_ARCH_V4, N_("use -march=armv4")},
17015 {"mv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17016 {"marmv4t", &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
17017 {"mv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17018 {"marmv5", &legacy_cpu, ARM_ARCH_V5, N_("use -march=armv5")},
17019 {"mv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17020 {"marmv5t", &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
17021 {"mv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17022 {"marmv5e", &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
17024 /* Floating point variants -- don't add any more to this list either. */
17025 {"mfpe-old", &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
17026 {"mfpa10", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
17027 {"mfpa11", &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
17028 {"mno-fpu", &legacy_fpu, ARM_ARCH_NONE,
17029 N_("use either -mfpu=softfpa or -mfpu=softvfp")},
17031 {NULL, NULL, ARM_ARCH_NONE, NULL}
17034 struct arm_cpu_option_table
17037 const arm_feature_set value;
17038 /* For some CPUs we assume an FPU unless the user explicitly sets
17040 const arm_feature_set default_fpu;
17041 /* The canonical name of the CPU, or NULL to use NAME converted to upper
17043 const char *canonical_name;
17046 /* This list should, at a minimum, contain all the cpu names
17047 recognized by GCC. */
17048 static const struct arm_cpu_option_table arm_cpus[] =
17050 {"all", ARM_ANY, FPU_ARCH_FPA, NULL},
17051 {"arm1", ARM_ARCH_V1, FPU_ARCH_FPA, NULL},
17052 {"arm2", ARM_ARCH_V2, FPU_ARCH_FPA, NULL},
17053 {"arm250", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17054 {"arm3", ARM_ARCH_V2S, FPU_ARCH_FPA, NULL},
17055 {"arm6", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17056 {"arm60", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17057 {"arm600", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17058 {"arm610", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17059 {"arm620", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17060 {"arm7", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17061 {"arm7m", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17062 {"arm7d", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17063 {"arm7dm", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17064 {"arm7di", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17065 {"arm7dmi", ARM_ARCH_V3M, FPU_ARCH_FPA, NULL},
17066 {"arm70", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17067 {"arm700", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17068 {"arm700i", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17069 {"arm710", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17070 {"arm710t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17071 {"arm720", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17072 {"arm720t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17073 {"arm740t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17074 {"arm710c", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17075 {"arm7100", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17076 {"arm7500", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17077 {"arm7500fe", ARM_ARCH_V3, FPU_ARCH_FPA, NULL},
17078 {"arm7t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17079 {"arm7tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17080 {"arm7tdmi-s", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17081 {"arm8", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17082 {"arm810", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17083 {"strongarm", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17084 {"strongarm1", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17085 {"strongarm110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17086 {"strongarm1100", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17087 {"strongarm1110", ARM_ARCH_V4, FPU_ARCH_FPA, NULL},
17088 {"arm9", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17089 {"arm920", ARM_ARCH_V4T, FPU_ARCH_FPA, "ARM920T"},
17090 {"arm920t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17091 {"arm922t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17092 {"arm940t", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17093 {"arm9tdmi", ARM_ARCH_V4T, FPU_ARCH_FPA, NULL},
17094 /* For V5 or later processors we default to using VFP; but the user
17095 should really set the FPU type explicitly. */
17096 {"arm9e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17097 {"arm9e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17098 {"arm926ej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17099 {"arm926ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM926EJ-S"},
17100 {"arm926ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17101 {"arm946e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17102 {"arm946e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM946E-S"},
17103 {"arm946e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17104 {"arm966e-r0", ARM_ARCH_V5TExP, FPU_ARCH_VFP_V2, NULL},
17105 {"arm966e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM966E-S"},
17106 {"arm966e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17107 {"arm968e-s", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17108 {"arm10t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17109 {"arm10tdmi", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17110 {"arm10e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17111 {"arm1020", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, "ARM1020E"},
17112 {"arm1020t", ARM_ARCH_V5T, FPU_ARCH_VFP_V1, NULL},
17113 {"arm1020e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17114 {"arm1022e", ARM_ARCH_V5TE, FPU_ARCH_VFP_V2, NULL},
17115 {"arm1026ejs", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, "ARM1026EJ-S"},
17116 {"arm1026ej-s", ARM_ARCH_V5TEJ, FPU_ARCH_VFP_V2, NULL},
17117 {"arm1136js", ARM_ARCH_V6, FPU_NONE, "ARM1136J-S"},
17118 {"arm1136j-s", ARM_ARCH_V6, FPU_NONE, NULL},
17119 {"arm1136jfs", ARM_ARCH_V6, FPU_ARCH_VFP_V2, "ARM1136JF-S"},
17120 {"arm1136jf-s", ARM_ARCH_V6, FPU_ARCH_VFP_V2, NULL},
17121 {"mpcore", ARM_ARCH_V6K, FPU_ARCH_VFP_V2, NULL},
17122 {"mpcorenovfp", ARM_ARCH_V6K, FPU_NONE, NULL},
17123 {"arm1156t2-s", ARM_ARCH_V6T2, FPU_NONE, NULL},
17124 {"arm1156t2f-s", ARM_ARCH_V6T2, FPU_ARCH_VFP_V2, NULL},
17125 {"arm1176jz-s", ARM_ARCH_V6ZK, FPU_NONE, NULL},
17126 {"arm1176jzf-s", ARM_ARCH_V6ZK, FPU_ARCH_VFP_V2, NULL},
17127 {"cortex-a8", ARM_ARCH_V7A, ARM_FEATURE(0, FPU_VFP_V3
17128 | FPU_NEON_EXT_V1),
17130 {"cortex-r4", ARM_ARCH_V7R, FPU_NONE, NULL},
17131 {"cortex-m3", ARM_ARCH_V7M, FPU_NONE, NULL},
17132 /* ??? XSCALE is really an architecture. */
17133 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17134 /* ??? iwmmxt is not a processor. */
17135 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP_V2, NULL},
17136 {"i80200", ARM_ARCH_XSCALE, FPU_ARCH_VFP_V2, NULL},
17138 {"ep9312", ARM_FEATURE(ARM_AEXT_V4T, ARM_CEXT_MAVERICK), FPU_ARCH_MAVERICK, "ARM920T"},
17139 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL}
17142 struct arm_arch_option_table
17145 const arm_feature_set value;
17146 const arm_feature_set default_fpu;
17149 /* This list should, at a minimum, contain all the architecture names
17150 recognized by GCC. */
17151 static const struct arm_arch_option_table arm_archs[] =
17153 {"all", ARM_ANY, FPU_ARCH_FPA},
17154 {"armv1", ARM_ARCH_V1, FPU_ARCH_FPA},
17155 {"armv2", ARM_ARCH_V2, FPU_ARCH_FPA},
17156 {"armv2a", ARM_ARCH_V2S, FPU_ARCH_FPA},
17157 {"armv2s", ARM_ARCH_V2S, FPU_ARCH_FPA},
17158 {"armv3", ARM_ARCH_V3, FPU_ARCH_FPA},
17159 {"armv3m", ARM_ARCH_V3M, FPU_ARCH_FPA},
17160 {"armv4", ARM_ARCH_V4, FPU_ARCH_FPA},
17161 {"armv4xm", ARM_ARCH_V4xM, FPU_ARCH_FPA},
17162 {"armv4t", ARM_ARCH_V4T, FPU_ARCH_FPA},
17163 {"armv4txm", ARM_ARCH_V4TxM, FPU_ARCH_FPA},
17164 {"armv5", ARM_ARCH_V5, FPU_ARCH_VFP},
17165 {"armv5t", ARM_ARCH_V5T, FPU_ARCH_VFP},
17166 {"armv5txm", ARM_ARCH_V5TxM, FPU_ARCH_VFP},
17167 {"armv5te", ARM_ARCH_V5TE, FPU_ARCH_VFP},
17168 {"armv5texp", ARM_ARCH_V5TExP, FPU_ARCH_VFP},
17169 {"armv5tej", ARM_ARCH_V5TEJ, FPU_ARCH_VFP},
17170 {"armv6", ARM_ARCH_V6, FPU_ARCH_VFP},
17171 {"armv6j", ARM_ARCH_V6, FPU_ARCH_VFP},
17172 {"armv6k", ARM_ARCH_V6K, FPU_ARCH_VFP},
17173 {"armv6z", ARM_ARCH_V6Z, FPU_ARCH_VFP},
17174 {"armv6zk", ARM_ARCH_V6ZK, FPU_ARCH_VFP},
17175 {"armv6t2", ARM_ARCH_V6T2, FPU_ARCH_VFP},
17176 {"armv6kt2", ARM_ARCH_V6KT2, FPU_ARCH_VFP},
17177 {"armv6zt2", ARM_ARCH_V6ZT2, FPU_ARCH_VFP},
17178 {"armv6zkt2", ARM_ARCH_V6ZKT2, FPU_ARCH_VFP},
17179 {"armv7", ARM_ARCH_V7, FPU_ARCH_VFP},
17180 {"armv7a", ARM_ARCH_V7A, FPU_ARCH_VFP},
17181 {"armv7r", ARM_ARCH_V7R, FPU_ARCH_VFP},
17182 {"armv7m", ARM_ARCH_V7M, FPU_ARCH_VFP},
17183 {"xscale", ARM_ARCH_XSCALE, FPU_ARCH_VFP},
17184 {"iwmmxt", ARM_ARCH_IWMMXT, FPU_ARCH_VFP},
17185 {NULL, ARM_ARCH_NONE, ARM_ARCH_NONE}
17188 /* ISA extensions in the co-processor space. */
17189 struct arm_option_cpu_value_table
17192 const arm_feature_set value;
17195 static const struct arm_option_cpu_value_table arm_extensions[] =
17197 {"maverick", ARM_FEATURE (0, ARM_CEXT_MAVERICK)},
17198 {"xscale", ARM_FEATURE (0, ARM_CEXT_XSCALE)},
17199 {"iwmmxt", ARM_FEATURE (0, ARM_CEXT_IWMMXT)},
17200 {NULL, ARM_ARCH_NONE}
17203 /* This list should, at a minimum, contain all the fpu names
17204 recognized by GCC. */
17205 static const struct arm_option_cpu_value_table arm_fpus[] =
17207 {"softfpa", FPU_NONE},
17208 {"fpe", FPU_ARCH_FPE},
17209 {"fpe2", FPU_ARCH_FPE},
17210 {"fpe3", FPU_ARCH_FPA}, /* Third release supports LFM/SFM. */
17211 {"fpa", FPU_ARCH_FPA},
17212 {"fpa10", FPU_ARCH_FPA},
17213 {"fpa11", FPU_ARCH_FPA},
17214 {"arm7500fe", FPU_ARCH_FPA},
17215 {"softvfp", FPU_ARCH_VFP},
17216 {"softvfp+vfp", FPU_ARCH_VFP_V2},
17217 {"vfp", FPU_ARCH_VFP_V2},
17218 {"vfp9", FPU_ARCH_VFP_V2},
17219 {"vfp3", FPU_ARCH_VFP_V3},
17220 {"vfp10", FPU_ARCH_VFP_V2},
17221 {"vfp10-r0", FPU_ARCH_VFP_V1},
17222 {"vfpxd", FPU_ARCH_VFP_V1xD},
17223 {"arm1020t", FPU_ARCH_VFP_V1},
17224 {"arm1020e", FPU_ARCH_VFP_V2},
17225 {"arm1136jfs", FPU_ARCH_VFP_V2},
17226 {"arm1136jf-s", FPU_ARCH_VFP_V2},
17227 {"maverick", FPU_ARCH_MAVERICK},
17228 {"neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1},
17229 {NULL, ARM_ARCH_NONE}
17232 struct arm_option_value_table
17238 static const struct arm_option_value_table arm_float_abis[] =
17240 {"hard", ARM_FLOAT_ABI_HARD},
17241 {"softfp", ARM_FLOAT_ABI_SOFTFP},
17242 {"soft", ARM_FLOAT_ABI_SOFT},
17247 /* We only know how to output GNU and ver 4/5 (AAELF) formats. */
17248 static const struct arm_option_value_table arm_eabis[] =
17250 {"gnu", EF_ARM_EABI_UNKNOWN},
17251 {"4", EF_ARM_EABI_VER4},
17252 {"5", EF_ARM_EABI_VER5},
17257 struct arm_long_option_table
17259 char * option; /* Substring to match. */
17260 char * help; /* Help information. */
17261 int (* func) (char * subopt); /* Function to decode sub-option. */
17262 char * deprecated; /* If non-null, print this message. */
17266 arm_parse_extension (char * str, const arm_feature_set **opt_p)
17268 arm_feature_set *ext_set = xmalloc (sizeof (arm_feature_set));
17270 /* Copy the feature set, so that we can modify it. */
17271 *ext_set = **opt_p;
17274 while (str != NULL && *str != 0)
17276 const struct arm_option_cpu_value_table * opt;
17282 as_bad (_("invalid architectural extension"));
17287 ext = strchr (str, '+');
17290 optlen = ext - str;
17292 optlen = strlen (str);
17296 as_bad (_("missing architectural extension"));
17300 for (opt = arm_extensions; opt->name != NULL; opt++)
17301 if (strncmp (opt->name, str, optlen) == 0)
17303 ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->value);
17307 if (opt->name == NULL)
17309 as_bad (_("unknown architectural extnsion `%s'"), str);
17320 arm_parse_cpu (char * str)
17322 const struct arm_cpu_option_table * opt;
17323 char * ext = strchr (str, '+');
17327 optlen = ext - str;
17329 optlen = strlen (str);
17333 as_bad (_("missing cpu name `%s'"), str);
17337 for (opt = arm_cpus; opt->name != NULL; opt++)
17338 if (strncmp (opt->name, str, optlen) == 0)
17340 mcpu_cpu_opt = &opt->value;
17341 mcpu_fpu_opt = &opt->default_fpu;
17342 if (opt->canonical_name)
17343 strcpy(selected_cpu_name, opt->canonical_name);
17347 for (i = 0; i < optlen; i++)
17348 selected_cpu_name[i] = TOUPPER (opt->name[i]);
17349 selected_cpu_name[i] = 0;
17353 return arm_parse_extension (ext, &mcpu_cpu_opt);
17358 as_bad (_("unknown cpu `%s'"), str);
17363 arm_parse_arch (char * str)
17365 const struct arm_arch_option_table *opt;
17366 char *ext = strchr (str, '+');
17370 optlen = ext - str;
17372 optlen = strlen (str);
17376 as_bad (_("missing architecture name `%s'"), str);
17380 for (opt = arm_archs; opt->name != NULL; opt++)
17381 if (streq (opt->name, str))
17383 march_cpu_opt = &opt->value;
17384 march_fpu_opt = &opt->default_fpu;
17385 strcpy(selected_cpu_name, opt->name);
17388 return arm_parse_extension (ext, &march_cpu_opt);
17393 as_bad (_("unknown architecture `%s'\n"), str);
17398 arm_parse_fpu (char * str)
17400 const struct arm_option_cpu_value_table * opt;
17402 for (opt = arm_fpus; opt->name != NULL; opt++)
17403 if (streq (opt->name, str))
17405 mfpu_opt = &opt->value;
17409 as_bad (_("unknown floating point format `%s'\n"), str);
17414 arm_parse_float_abi (char * str)
17416 const struct arm_option_value_table * opt;
17418 for (opt = arm_float_abis; opt->name != NULL; opt++)
17419 if (streq (opt->name, str))
17421 mfloat_abi_opt = opt->value;
17425 as_bad (_("unknown floating point abi `%s'\n"), str);
17431 arm_parse_eabi (char * str)
17433 const struct arm_option_value_table *opt;
17435 for (opt = arm_eabis; opt->name != NULL; opt++)
17436 if (streq (opt->name, str))
17438 meabi_flags = opt->value;
17441 as_bad (_("unknown EABI `%s'\n"), str);
17446 struct arm_long_option_table arm_long_opts[] =
17448 {"mcpu=", N_("<cpu name>\t assemble for CPU <cpu name>"),
17449 arm_parse_cpu, NULL},
17450 {"march=", N_("<arch name>\t assemble for architecture <arch name>"),
17451 arm_parse_arch, NULL},
17452 {"mfpu=", N_("<fpu name>\t assemble for FPU architecture <fpu name>"),
17453 arm_parse_fpu, NULL},
17454 {"mfloat-abi=", N_("<abi>\t assemble for floating point ABI <abi>"),
17455 arm_parse_float_abi, NULL},
17457 {"meabi=", N_("<ver>\t assemble for eabi version <ver>"),
17458 arm_parse_eabi, NULL},
17460 {NULL, NULL, 0, NULL}
17464 md_parse_option (int c, char * arg)
17466 struct arm_option_table *opt;
17467 const struct arm_legacy_option_table *fopt;
17468 struct arm_long_option_table *lopt;
17474 target_big_endian = 1;
17480 target_big_endian = 0;
17485 /* Listing option. Just ignore these, we don't support additional
17490 for (opt = arm_opts; opt->option != NULL; opt++)
17492 if (c == opt->option[0]
17493 && ((arg == NULL && opt->option[1] == 0)
17494 || streq (arg, opt->option + 1)))
17496 #if WARN_DEPRECATED
17497 /* If the option is deprecated, tell the user. */
17498 if (opt->deprecated != NULL)
17499 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
17500 arg ? arg : "", _(opt->deprecated));
17503 if (opt->var != NULL)
17504 *opt->var = opt->value;
17510 for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
17512 if (c == fopt->option[0]
17513 && ((arg == NULL && fopt->option[1] == 0)
17514 || streq (arg, fopt->option + 1)))
17516 #if WARN_DEPRECATED
17517 /* If the option is deprecated, tell the user. */
17518 if (fopt->deprecated != NULL)
17519 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
17520 arg ? arg : "", _(fopt->deprecated));
17523 if (fopt->var != NULL)
17524 *fopt->var = &fopt->value;
17530 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
17532 /* These options are expected to have an argument. */
17533 if (c == lopt->option[0]
17535 && strncmp (arg, lopt->option + 1,
17536 strlen (lopt->option + 1)) == 0)
17538 #if WARN_DEPRECATED
17539 /* If the option is deprecated, tell the user. */
17540 if (lopt->deprecated != NULL)
17541 as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
17542 _(lopt->deprecated));
17545 /* Call the sup-option parser. */
17546 return lopt->func (arg + strlen (lopt->option) - 1);
17557 md_show_usage (FILE * fp)
17559 struct arm_option_table *opt;
17560 struct arm_long_option_table *lopt;
17562 fprintf (fp, _(" ARM-specific assembler options:\n"));
17564 for (opt = arm_opts; opt->option != NULL; opt++)
17565 if (opt->help != NULL)
17566 fprintf (fp, " -%-23s%s\n", opt->option, _(opt->help));
17568 for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
17569 if (lopt->help != NULL)
17570 fprintf (fp, " -%s%s\n", lopt->option, _(lopt->help));
17574 -EB assemble code for a big-endian cpu\n"));
17579 -EL assemble code for a little-endian cpu\n"));
17588 arm_feature_set flags;
17589 } cpu_arch_ver_table;
17591 /* Mapping from CPU features to EABI CPU arch values. Table must be sorted
17592 least features first. */
17593 static const cpu_arch_ver_table cpu_arch_ver[] =
17598 {4, ARM_ARCH_V5TE},
17599 {5, ARM_ARCH_V5TEJ},
17603 {9, ARM_ARCH_V6T2},
17604 {10, ARM_ARCH_V7A},
17605 {10, ARM_ARCH_V7R},
17606 {10, ARM_ARCH_V7M},
17610 /* Set the public EABI object attributes. */
17612 aeabi_set_public_attributes (void)
17615 arm_feature_set flags;
17616 arm_feature_set tmp;
17617 const cpu_arch_ver_table *p;
17619 /* Choose the architecture based on the capabilities of the requested cpu
17620 (if any) and/or the instructions actually used. */
17621 ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
17622 ARM_MERGE_FEATURE_SETS (flags, flags, *mfpu_opt);
17623 ARM_MERGE_FEATURE_SETS (flags, flags, selected_cpu);
17627 for (p = cpu_arch_ver; p->val; p++)
17629 if (ARM_CPU_HAS_FEATURE (tmp, p->flags))
17632 ARM_CLEAR_FEATURE (tmp, tmp, p->flags);
17636 /* Tag_CPU_name. */
17637 if (selected_cpu_name[0])
17641 p = selected_cpu_name;
17642 if (strncmp(p, "armv", 4) == 0)
17647 for (i = 0; p[i]; i++)
17648 p[i] = TOUPPER (p[i]);
17650 elf32_arm_add_eabi_attr_string (stdoutput, 5, p);
17652 /* Tag_CPU_arch. */
17653 elf32_arm_add_eabi_attr_int (stdoutput, 6, arch);
17654 /* Tag_CPU_arch_profile. */
17655 if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7a))
17656 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'A');
17657 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7r))
17658 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'R');
17659 else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v7m))
17660 elf32_arm_add_eabi_attr_int (stdoutput, 7, 'M');
17661 /* Tag_ARM_ISA_use. */
17662 if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_full))
17663 elf32_arm_add_eabi_attr_int (stdoutput, 8, 1);
17664 /* Tag_THUMB_ISA_use. */
17665 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_full))
17666 elf32_arm_add_eabi_attr_int (stdoutput, 9,
17667 ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2) ? 2 : 1);
17668 /* Tag_VFP_arch. */
17669 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v3)
17670 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v3))
17671 elf32_arm_add_eabi_attr_int (stdoutput, 10, 3);
17672 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v2)
17673 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v2))
17674 elf32_arm_add_eabi_attr_int (stdoutput, 10, 2);
17675 else if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1)
17676 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1)
17677 || ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_vfp_ext_v1xd)
17678 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_vfp_ext_v1xd))
17679 elf32_arm_add_eabi_attr_int (stdoutput, 10, 1);
17680 /* Tag_WMMX_arch. */
17681 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_cext_iwmmxt)
17682 || ARM_CPU_HAS_FEATURE (arm_arch_used, arm_cext_iwmmxt))
17683 elf32_arm_add_eabi_attr_int (stdoutput, 11, 1);
17684 /* Tag_NEON_arch. */
17685 if (ARM_CPU_HAS_FEATURE (thumb_arch_used, fpu_neon_ext_v1)
17686 || ARM_CPU_HAS_FEATURE (arm_arch_used, fpu_neon_ext_v1))
17687 elf32_arm_add_eabi_attr_int (stdoutput, 12, 1);
17690 /* Add the .ARM.attributes section. */
17699 if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
17702 aeabi_set_public_attributes ();
17703 size = elf32_arm_eabi_attr_size (stdoutput);
17704 s = subseg_new (".ARM.attributes", 0);
17705 bfd_set_section_flags (stdoutput, s, SEC_READONLY | SEC_DATA);
17706 addr = frag_now_fix ();
17707 p = frag_more (size);
17708 elf32_arm_set_eabi_attr_contents (stdoutput, (bfd_byte *)p, size);
17710 #endif /* OBJ_ELF */
17713 /* Parse a .cpu directive. */
17716 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
17718 const struct arm_cpu_option_table *opt;
17722 name = input_line_pointer;
17723 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
17724 input_line_pointer++;
17725 saved_char = *input_line_pointer;
17726 *input_line_pointer = 0;
17728 /* Skip the first "all" entry. */
17729 for (opt = arm_cpus + 1; opt->name != NULL; opt++)
17730 if (streq (opt->name, name))
17732 mcpu_cpu_opt = &opt->value;
17733 selected_cpu = opt->value;
17734 if (opt->canonical_name)
17735 strcpy(selected_cpu_name, opt->canonical_name);
17739 for (i = 0; opt->name[i]; i++)
17740 selected_cpu_name[i] = TOUPPER (opt->name[i]);
17741 selected_cpu_name[i] = 0;
17743 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17744 *input_line_pointer = saved_char;
17745 demand_empty_rest_of_line ();
17748 as_bad (_("unknown cpu `%s'"), name);
17749 *input_line_pointer = saved_char;
17750 ignore_rest_of_line ();
17754 /* Parse a .arch directive. */
17757 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
17759 const struct arm_arch_option_table *opt;
17763 name = input_line_pointer;
17764 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
17765 input_line_pointer++;
17766 saved_char = *input_line_pointer;
17767 *input_line_pointer = 0;
17769 /* Skip the first "all" entry. */
17770 for (opt = arm_archs + 1; opt->name != NULL; opt++)
17771 if (streq (opt->name, name))
17773 mcpu_cpu_opt = &opt->value;
17774 selected_cpu = opt->value;
17775 strcpy(selected_cpu_name, opt->name);
17776 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17777 *input_line_pointer = saved_char;
17778 demand_empty_rest_of_line ();
17782 as_bad (_("unknown architecture `%s'\n"), name);
17783 *input_line_pointer = saved_char;
17784 ignore_rest_of_line ();
17788 /* Parse a .fpu directive. */
17791 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
17793 const struct arm_option_cpu_value_table *opt;
17797 name = input_line_pointer;
17798 while (*input_line_pointer && !ISSPACE(*input_line_pointer))
17799 input_line_pointer++;
17800 saved_char = *input_line_pointer;
17801 *input_line_pointer = 0;
17803 for (opt = arm_fpus; opt->name != NULL; opt++)
17804 if (streq (opt->name, name))
17806 mfpu_opt = &opt->value;
17807 ARM_MERGE_FEATURE_SETS (cpu_variant, *mcpu_cpu_opt, *mfpu_opt);
17808 *input_line_pointer = saved_char;
17809 demand_empty_rest_of_line ();
17813 as_bad (_("unknown floating point format `%s'\n"), name);
17814 *input_line_pointer = saved_char;
17815 ignore_rest_of_line ();