1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
32 #include "safe-ctype.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
48 #define DEFAULT_ARCH "i386"
53 #define INLINE __inline__
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
69 #define HLE_PREFIX REP_PREFIX
70 #define BND_PREFIX REP_PREFIX
72 #define REX_PREFIX 6 /* must come last. */
73 #define MAX_PREFIXES 7 /* max prefixes per opcode */
75 /* we define the syntax here (modulo base,index,scale syntax) */
76 #define REGISTER_PREFIX '%'
77 #define IMMEDIATE_PREFIX '$'
78 #define ABSOLUTE_PREFIX '*'
80 /* these are the instruction mnemonic suffixes in AT&T syntax or
81 memory operand size in Intel syntax. */
82 #define WORD_MNEM_SUFFIX 'w'
83 #define BYTE_MNEM_SUFFIX 'b'
84 #define SHORT_MNEM_SUFFIX 's'
85 #define LONG_MNEM_SUFFIX 'l'
86 #define QWORD_MNEM_SUFFIX 'q'
87 #define XMMWORD_MNEM_SUFFIX 'x'
88 #define YMMWORD_MNEM_SUFFIX 'y'
89 #define ZMMWORD_MNEM_SUFFIX 'z'
90 /* Intel Syntax. Use a non-ascii letter since since it never appears
92 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
94 #define END_OF_INSN '\0'
97 'templates' is for grouping together 'template' structures for opcodes
98 of the same name. This is only used for storing the insns in the grand
99 ole hash table of insns.
100 The templates themselves start at START and range up to (but not including)
105 const insn_template *start;
106 const insn_template *end;
110 /* 386 operand encoding bytes: see 386 book for details of this. */
113 unsigned int regmem; /* codes register or memory operand */
114 unsigned int reg; /* codes register operand (or extended opcode) */
115 unsigned int mode; /* how to interpret regmem & reg */
119 /* x86-64 extension prefix. */
120 typedef int rex_byte;
122 /* 386 opcode byte to code indirect addressing. */
131 /* x86 arch names, types and features */
134 const char *name; /* arch name */
135 unsigned int len; /* arch string length */
136 enum processor_type type; /* arch type */
137 i386_cpu_flags flags; /* cpu feature flags */
138 unsigned int skip; /* show_arch should skip this. */
139 unsigned int negated; /* turn off indicated flags. */
143 static void update_code_flag (int, int);
144 static void set_code_flag (int);
145 static void set_16bit_gcc_code_flag (int);
146 static void set_intel_syntax (int);
147 static void set_intel_mnemonic (int);
148 static void set_allow_index_reg (int);
149 static void set_check (int);
150 static void set_cpu_arch (int);
152 static void pe_directive_secrel (int);
154 static void signed_cons (int);
155 static char *output_invalid (int c);
156 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
158 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
160 static int i386_att_operand (char *);
161 static int i386_intel_operand (char *, int);
162 static int i386_intel_simplify (expressionS *);
163 static int i386_intel_parse_name (const char *, expressionS *);
164 static const reg_entry *parse_register (char *, char **);
165 static char *parse_insn (char *, char *);
166 static char *parse_operands (char *, const char *);
167 static void swap_operands (void);
168 static void swap_2_operands (int, int);
169 static void optimize_imm (void);
170 static void optimize_disp (void);
171 static const insn_template *match_template (void);
172 static int check_string (void);
173 static int process_suffix (void);
174 static int check_byte_reg (void);
175 static int check_long_reg (void);
176 static int check_qword_reg (void);
177 static int check_word_reg (void);
178 static int finalize_imm (void);
179 static int process_operands (void);
180 static const seg_entry *build_modrm_byte (void);
181 static void output_insn (void);
182 static void output_imm (fragS *, offsetT);
183 static void output_disp (fragS *, offsetT);
185 static void s_bss (int);
187 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
188 static void handle_large_common (int small ATTRIBUTE_UNUSED);
191 static const char *default_arch = DEFAULT_ARCH;
193 /* This struct describes rounding control and SAE in the instruction. */
207 static struct RC_Operation rc_op;
209 /* The struct describes masking, applied to OPERAND in the instruction.
210 MASK is a pointer to the corresponding mask register. ZEROING tells
211 whether merging or zeroing mask is used. */
212 struct Mask_Operation
214 const reg_entry *mask;
215 unsigned int zeroing;
216 /* The operand where this operation is associated. */
220 static struct Mask_Operation mask_op;
222 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
224 struct Broadcast_Operation
226 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
229 /* Index of broadcasted operand. */
233 static struct Broadcast_Operation broadcast_op;
238 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
239 unsigned char bytes[4];
241 /* Destination or source register specifier. */
242 const reg_entry *register_specifier;
245 /* 'md_assemble ()' gathers together information and puts it into a
252 const reg_entry *regs;
257 operand_size_mismatch,
258 operand_type_mismatch,
259 register_type_mismatch,
260 number_of_operands_mismatch,
261 invalid_instruction_suffix,
264 unsupported_with_intel_mnemonic,
267 invalid_vsib_address,
268 invalid_vector_register_set,
269 unsupported_vector_index_register,
270 unsupported_broadcast,
271 broadcast_not_on_src_operand,
274 mask_not_on_destination,
277 rc_sae_operand_not_last_imm,
278 invalid_register_operand,
284 /* TM holds the template for the insn were currently assembling. */
287 /* SUFFIX holds the instruction size suffix for byte, word, dword
288 or qword, if given. */
291 /* OPERANDS gives the number of given operands. */
292 unsigned int operands;
294 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
295 of given register, displacement, memory operands and immediate
297 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
299 /* TYPES [i] is the type (see above #defines) which tells us how to
300 use OP[i] for the corresponding operand. */
301 i386_operand_type types[MAX_OPERANDS];
303 /* Displacement expression, immediate expression, or register for each
305 union i386_op op[MAX_OPERANDS];
307 /* Flags for operands. */
308 unsigned int flags[MAX_OPERANDS];
309 #define Operand_PCrel 1
311 /* Relocation type for operand */
312 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
314 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
315 the base index byte below. */
316 const reg_entry *base_reg;
317 const reg_entry *index_reg;
318 unsigned int log2_scale_factor;
320 /* SEG gives the seg_entries of this insn. They are zero unless
321 explicit segment overrides are given. */
322 const seg_entry *seg[2];
324 /* PREFIX holds all the given prefix opcodes (usually null).
325 PREFIXES is the number of prefix opcodes. */
326 unsigned int prefixes;
327 unsigned char prefix[MAX_PREFIXES];
329 /* RM and SIB are the modrm byte and the sib byte where the
330 addressing modes of this insn are encoded. */
337 /* Masking attributes. */
338 struct Mask_Operation *mask;
340 /* Rounding control and SAE attributes. */
341 struct RC_Operation *rounding;
343 /* Broadcasting attributes. */
344 struct Broadcast_Operation *broadcast;
346 /* Compressed disp8*N attribute. */
347 unsigned int memshift;
349 /* Swap operand in encoding. */
350 unsigned int swap_operand;
352 /* Prefer 8bit or 32bit displacement in encoding. */
355 disp_encoding_default = 0,
361 const char *rep_prefix;
364 const char *hle_prefix;
366 /* Have BND prefix. */
367 const char *bnd_prefix;
369 /* Need VREX to support upper 16 registers. */
373 enum i386_error error;
376 typedef struct _i386_insn i386_insn;
378 /* Link RC type with corresponding string, that'll be looked for in
387 static const struct RC_name RC_NamesTable[] =
389 { rne, STRING_COMMA_LEN ("rn-sae") },
390 { rd, STRING_COMMA_LEN ("rd-sae") },
391 { ru, STRING_COMMA_LEN ("ru-sae") },
392 { rz, STRING_COMMA_LEN ("rz-sae") },
393 { saeonly, STRING_COMMA_LEN ("sae") },
396 /* List of chars besides those in app.c:symbol_chars that can start an
397 operand. Used to prevent the scrubber eating vital white-space. */
398 const char extra_symbol_chars[] = "*%-([{"
407 #if (defined (TE_I386AIX) \
408 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
409 && !defined (TE_GNU) \
410 && !defined (TE_LINUX) \
411 && !defined (TE_NACL) \
412 && !defined (TE_NETWARE) \
413 && !defined (TE_FreeBSD) \
414 && !defined (TE_DragonFly) \
415 && !defined (TE_NetBSD)))
416 /* This array holds the chars that always start a comment. If the
417 pre-processor is disabled, these aren't very useful. The option
418 --divide will remove '/' from this list. */
419 const char *i386_comment_chars = "#/";
420 #define SVR4_COMMENT_CHARS 1
421 #define PREFIX_SEPARATOR '\\'
424 const char *i386_comment_chars = "#";
425 #define PREFIX_SEPARATOR '/'
428 /* This array holds the chars that only start a comment at the beginning of
429 a line. If the line seems to have the form '# 123 filename'
430 .line and .file directives will appear in the pre-processed output.
431 Note that input_file.c hand checks for '#' at the beginning of the
432 first line of the input file. This is because the compiler outputs
433 #NO_APP at the beginning of its output.
434 Also note that comments started like this one will always work if
435 '/' isn't otherwise defined. */
436 const char line_comment_chars[] = "#/";
438 const char line_separator_chars[] = ";";
440 /* Chars that can be used to separate mant from exp in floating point
442 const char EXP_CHARS[] = "eE";
444 /* Chars that mean this number is a floating point constant
447 const char FLT_CHARS[] = "fFdDxX";
449 /* Tables for lexical analysis. */
450 static char mnemonic_chars[256];
451 static char register_chars[256];
452 static char operand_chars[256];
453 static char identifier_chars[256];
454 static char digit_chars[256];
456 /* Lexical macros. */
457 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
458 #define is_operand_char(x) (operand_chars[(unsigned char) x])
459 #define is_register_char(x) (register_chars[(unsigned char) x])
460 #define is_space_char(x) ((x) == ' ')
461 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
462 #define is_digit_char(x) (digit_chars[(unsigned char) x])
464 /* All non-digit non-letter characters that may occur in an operand. */
465 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
467 /* md_assemble() always leaves the strings it's passed unaltered. To
468 effect this we maintain a stack of saved characters that we've smashed
469 with '\0's (indicating end of strings for various sub-fields of the
470 assembler instruction). */
471 static char save_stack[32];
472 static char *save_stack_p;
473 #define END_STRING_AND_SAVE(s) \
474 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
475 #define RESTORE_END_STRING(s) \
476 do { *(s) = *--save_stack_p; } while (0)
478 /* The instruction we're assembling. */
481 /* Possible templates for current insn. */
482 static const templates *current_templates;
484 /* Per instruction expressionS buffers: max displacements & immediates. */
485 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
486 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
488 /* Current operand we are working on. */
489 static int this_operand = -1;
491 /* We support four different modes. FLAG_CODE variable is used to distinguish
499 static enum flag_code flag_code;
500 static unsigned int object_64bit;
501 static unsigned int disallow_64bit_reloc;
502 static int use_rela_relocations = 0;
504 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
505 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
506 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
508 /* The ELF ABI to use. */
516 static enum x86_elf_abi x86_elf_abi = I386_ABI;
519 /* 1 for intel syntax,
521 static int intel_syntax = 0;
523 /* 1 for intel mnemonic,
524 0 if att mnemonic. */
525 static int intel_mnemonic = !SYSV386_COMPAT;
527 /* 1 if support old (<= 2.8.1) versions of gcc. */
528 static int old_gcc = OLDGCC_COMPAT;
530 /* 1 if pseudo registers are permitted. */
531 static int allow_pseudo_reg = 0;
533 /* 1 if register prefix % not required. */
534 static int allow_naked_reg = 0;
536 /* 1 if the assembler should add BND prefix for all control-tranferring
537 instructions supporting it, even if this prefix wasn't specified
539 static int add_bnd_prefix = 0;
541 /* 1 if pseudo index register, eiz/riz, is allowed . */
542 static int allow_index_reg = 0;
544 static enum check_kind
550 sse_check, operand_check = check_warning;
552 /* Register prefix used for error message. */
553 static const char *register_prefix = "%";
555 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
556 leave, push, and pop instructions so that gcc has the same stack
557 frame as in 32 bit mode. */
558 static char stackop_size = '\0';
560 /* Non-zero to optimize code alignment. */
561 int optimize_align_code = 1;
563 /* Non-zero to quieten some warnings. */
564 static int quiet_warnings = 0;
567 static const char *cpu_arch_name = NULL;
568 static char *cpu_sub_arch_name = NULL;
570 /* CPU feature flags. */
571 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
573 /* If we have selected a cpu we are generating instructions for. */
574 static int cpu_arch_tune_set = 0;
576 /* Cpu we are generating instructions for. */
577 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
579 /* CPU feature flags of cpu we are generating instructions for. */
580 static i386_cpu_flags cpu_arch_tune_flags;
582 /* CPU instruction set architecture used. */
583 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
585 /* CPU feature flags of instruction set architecture used. */
586 i386_cpu_flags cpu_arch_isa_flags;
588 /* If set, conditional jumps are not automatically promoted to handle
589 larger than a byte offset. */
590 static unsigned int no_cond_jump_promotion = 0;
592 /* Encode SSE instructions with VEX prefix. */
593 static unsigned int sse2avx;
595 /* Encode scalar AVX instructions with specific vector length. */
602 /* Encode scalar EVEX LIG instructions with specific vector length. */
610 /* Encode EVEX WIG instructions with specific evex.w. */
617 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
618 static symbolS *GOT_symbol;
620 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
621 unsigned int x86_dwarf2_return_column;
623 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
624 int x86_cie_data_alignment;
626 /* Interface to relax_segment.
627 There are 3 major relax states for 386 jump insns because the
628 different types of jumps add different sizes to frags when we're
629 figuring out what sort of jump to choose to reach a given label. */
632 #define UNCOND_JUMP 0
634 #define COND_JUMP86 2
639 #define SMALL16 (SMALL | CODE16)
641 #define BIG16 (BIG | CODE16)
645 #define INLINE __inline__
651 #define ENCODE_RELAX_STATE(type, size) \
652 ((relax_substateT) (((type) << 2) | (size)))
653 #define TYPE_FROM_RELAX_STATE(s) \
655 #define DISP_SIZE_FROM_RELAX_STATE(s) \
656 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
658 /* This table is used by relax_frag to promote short jumps to long
659 ones where necessary. SMALL (short) jumps may be promoted to BIG
660 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
661 don't allow a short jump in a 32 bit code segment to be promoted to
662 a 16 bit offset jump because it's slower (requires data size
663 prefix), and doesn't work, unless the destination is in the bottom
664 64k of the code segment (The top 16 bits of eip are zeroed). */
666 const relax_typeS md_relax_table[] =
669 1) most positive reach of this state,
670 2) most negative reach of this state,
671 3) how many bytes this mode will have in the variable part of the frag
672 4) which index into the table to try if we can't fit into this one. */
674 /* UNCOND_JUMP states. */
675 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
676 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
677 /* dword jmp adds 4 bytes to frag:
678 0 extra opcode bytes, 4 displacement bytes. */
680 /* word jmp adds 2 byte2 to frag:
681 0 extra opcode bytes, 2 displacement bytes. */
684 /* COND_JUMP states. */
685 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
686 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
687 /* dword conditionals adds 5 bytes to frag:
688 1 extra opcode byte, 4 displacement bytes. */
690 /* word conditionals add 3 bytes to frag:
691 1 extra opcode byte, 2 displacement bytes. */
694 /* COND_JUMP86 states. */
695 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
696 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
697 /* dword conditionals adds 5 bytes to frag:
698 1 extra opcode byte, 4 displacement bytes. */
700 /* word conditionals add 4 bytes to frag:
701 1 displacement byte and a 3 byte long branch insn. */
705 static const arch_entry cpu_arch[] =
707 /* Do not replace the first two entries - i386_target_format()
708 relies on them being there in this order. */
709 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
710 CPU_GENERIC32_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
712 CPU_GENERIC64_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
714 CPU_NONE_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
716 CPU_I186_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
718 CPU_I286_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
720 CPU_I386_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
722 CPU_I486_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
724 CPU_I586_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
726 CPU_I686_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
728 CPU_I586_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
730 CPU_PENTIUMPRO_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
732 CPU_P2_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
734 CPU_P3_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
736 CPU_P4_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
738 CPU_CORE_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
740 CPU_NOCONA_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
742 CPU_CORE_FLAGS, 1, 0 },
743 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
744 CPU_CORE_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
746 CPU_CORE2_FLAGS, 1, 0 },
747 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
748 CPU_CORE2_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
750 CPU_COREI7_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
752 CPU_L1OM_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
754 CPU_K1OM_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
756 CPU_K6_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
758 CPU_K6_2_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
760 CPU_ATHLON_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
762 CPU_K8_FLAGS, 1, 0 },
763 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
764 CPU_K8_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
766 CPU_K8_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
768 CPU_AMDFAM10_FLAGS, 0, 0 },
769 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
770 CPU_BDVER1_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
772 CPU_BDVER2_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
774 CPU_BDVER3_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
776 CPU_BDVER4_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
778 CPU_BTVER1_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
780 CPU_BTVER2_FLAGS, 0, 0 },
781 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
782 CPU_8087_FLAGS, 0, 0 },
783 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
784 CPU_287_FLAGS, 0, 0 },
785 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
786 CPU_387_FLAGS, 0, 0 },
787 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
788 CPU_ANY87_FLAGS, 0, 1 },
789 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
790 CPU_MMX_FLAGS, 0, 0 },
791 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
792 CPU_3DNOWA_FLAGS, 0, 1 },
793 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
794 CPU_SSE_FLAGS, 0, 0 },
795 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
796 CPU_SSE2_FLAGS, 0, 0 },
797 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
798 CPU_SSE3_FLAGS, 0, 0 },
799 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
800 CPU_SSSE3_FLAGS, 0, 0 },
801 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
802 CPU_SSE4_1_FLAGS, 0, 0 },
803 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
804 CPU_SSE4_2_FLAGS, 0, 0 },
805 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
806 CPU_SSE4_2_FLAGS, 0, 0 },
807 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
808 CPU_ANY_SSE_FLAGS, 0, 1 },
809 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
810 CPU_AVX_FLAGS, 0, 0 },
811 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
812 CPU_AVX2_FLAGS, 0, 0 },
813 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
814 CPU_AVX512F_FLAGS, 0, 0 },
815 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
816 CPU_AVX512CD_FLAGS, 0, 0 },
817 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
818 CPU_AVX512ER_FLAGS, 0, 0 },
819 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
820 CPU_AVX512PF_FLAGS, 0, 0 },
821 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
822 CPU_ANY_AVX_FLAGS, 0, 1 },
823 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
824 CPU_VMX_FLAGS, 0, 0 },
825 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
826 CPU_VMFUNC_FLAGS, 0, 0 },
827 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
828 CPU_SMX_FLAGS, 0, 0 },
829 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
830 CPU_XSAVE_FLAGS, 0, 0 },
831 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
832 CPU_XSAVEOPT_FLAGS, 0, 0 },
833 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
834 CPU_AES_FLAGS, 0, 0 },
835 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
836 CPU_PCLMUL_FLAGS, 0, 0 },
837 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
838 CPU_PCLMUL_FLAGS, 1, 0 },
839 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
840 CPU_FSGSBASE_FLAGS, 0, 0 },
841 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
842 CPU_RDRND_FLAGS, 0, 0 },
843 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
844 CPU_F16C_FLAGS, 0, 0 },
845 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
846 CPU_BMI2_FLAGS, 0, 0 },
847 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
848 CPU_FMA_FLAGS, 0, 0 },
849 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
850 CPU_FMA4_FLAGS, 0, 0 },
851 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
852 CPU_XOP_FLAGS, 0, 0 },
853 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
854 CPU_LWP_FLAGS, 0, 0 },
855 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
856 CPU_MOVBE_FLAGS, 0, 0 },
857 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
858 CPU_CX16_FLAGS, 0, 0 },
859 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
860 CPU_EPT_FLAGS, 0, 0 },
861 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
862 CPU_LZCNT_FLAGS, 0, 0 },
863 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
864 CPU_HLE_FLAGS, 0, 0 },
865 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
866 CPU_RTM_FLAGS, 0, 0 },
867 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
868 CPU_INVPCID_FLAGS, 0, 0 },
869 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
870 CPU_CLFLUSH_FLAGS, 0, 0 },
871 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
872 CPU_NOP_FLAGS, 0, 0 },
873 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
874 CPU_SYSCALL_FLAGS, 0, 0 },
875 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
876 CPU_RDTSCP_FLAGS, 0, 0 },
877 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
878 CPU_3DNOW_FLAGS, 0, 0 },
879 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
880 CPU_3DNOWA_FLAGS, 0, 0 },
881 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
882 CPU_PADLOCK_FLAGS, 0, 0 },
883 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
884 CPU_SVME_FLAGS, 1, 0 },
885 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
886 CPU_SVME_FLAGS, 0, 0 },
887 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
888 CPU_SSE4A_FLAGS, 0, 0 },
889 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
890 CPU_ABM_FLAGS, 0, 0 },
891 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
892 CPU_BMI_FLAGS, 0, 0 },
893 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
894 CPU_TBM_FLAGS, 0, 0 },
895 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
896 CPU_ADX_FLAGS, 0, 0 },
897 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
898 CPU_RDSEED_FLAGS, 0, 0 },
899 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
900 CPU_PRFCHW_FLAGS, 0, 0 },
901 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
902 CPU_SMAP_FLAGS, 0, 0 },
903 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
904 CPU_MPX_FLAGS, 0, 0 },
905 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
906 CPU_SHA_FLAGS, 0, 0 },
907 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
908 CPU_CLFLUSHOPT_FLAGS, 0, 0 },
909 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
910 CPU_XSAVEC_FLAGS, 0, 0 },
911 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
912 CPU_XSAVES_FLAGS, 0, 0 },
913 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
914 CPU_PREFETCHWT1_FLAGS, 0, 0 },
918 /* Like s_lcomm_internal in gas/read.c but the alignment string
919 is allowed to be optional. */
922 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
929 && *input_line_pointer == ',')
931 align = parse_align (needs_align - 1);
933 if (align == (addressT) -1)
948 bss_alloc (symbolP, size, align);
953 pe_lcomm (int needs_align)
955 s_comm_internal (needs_align * 2, pe_lcomm_internal);
959 const pseudo_typeS md_pseudo_table[] =
961 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
962 {"align", s_align_bytes, 0},
964 {"align", s_align_ptwo, 0},
966 {"arch", set_cpu_arch, 0},
970 {"lcomm", pe_lcomm, 1},
972 {"ffloat", float_cons, 'f'},
973 {"dfloat", float_cons, 'd'},
974 {"tfloat", float_cons, 'x'},
976 {"slong", signed_cons, 4},
977 {"noopt", s_ignore, 0},
978 {"optim", s_ignore, 0},
979 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
980 {"code16", set_code_flag, CODE_16BIT},
981 {"code32", set_code_flag, CODE_32BIT},
982 {"code64", set_code_flag, CODE_64BIT},
983 {"intel_syntax", set_intel_syntax, 1},
984 {"att_syntax", set_intel_syntax, 0},
985 {"intel_mnemonic", set_intel_mnemonic, 1},
986 {"att_mnemonic", set_intel_mnemonic, 0},
987 {"allow_index_reg", set_allow_index_reg, 1},
988 {"disallow_index_reg", set_allow_index_reg, 0},
989 {"sse_check", set_check, 0},
990 {"operand_check", set_check, 1},
991 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
992 {"largecomm", handle_large_common, 0},
994 {"file", (void (*) (int)) dwarf2_directive_file, 0},
995 {"loc", dwarf2_directive_loc, 0},
996 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
999 {"secrel32", pe_directive_secrel, 0},
1004 /* For interface with expression (). */
1005 extern char *input_line_pointer;
1007 /* Hash table for instruction mnemonic lookup. */
1008 static struct hash_control *op_hash;
1010 /* Hash table for register lookup. */
1011 static struct hash_control *reg_hash;
1014 i386_align_code (fragS *fragP, int count)
1016 /* Various efficient no-op patterns for aligning code labels.
1017 Note: Don't try to assemble the instructions in the comments.
1018 0L and 0w are not legal. */
1019 static const char f32_1[] =
1021 static const char f32_2[] =
1022 {0x66,0x90}; /* xchg %ax,%ax */
1023 static const char f32_3[] =
1024 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1025 static const char f32_4[] =
1026 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1027 static const char f32_5[] =
1029 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1030 static const char f32_6[] =
1031 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1032 static const char f32_7[] =
1033 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1034 static const char f32_8[] =
1036 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1037 static const char f32_9[] =
1038 {0x89,0xf6, /* movl %esi,%esi */
1039 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1040 static const char f32_10[] =
1041 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1042 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1043 static const char f32_11[] =
1044 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1045 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1046 static const char f32_12[] =
1047 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1048 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1049 static const char f32_13[] =
1050 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1051 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1052 static const char f32_14[] =
1053 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1054 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1055 static const char f16_3[] =
1056 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1057 static const char f16_4[] =
1058 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1059 static const char f16_5[] =
1061 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1062 static const char f16_6[] =
1063 {0x89,0xf6, /* mov %si,%si */
1064 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1065 static const char f16_7[] =
1066 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1067 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1068 static const char f16_8[] =
1069 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1070 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1071 static const char jump_31[] =
1072 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1073 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1074 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1075 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1076 static const char *const f32_patt[] = {
1077 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1078 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1080 static const char *const f16_patt[] = {
1081 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1083 /* nopl (%[re]ax) */
1084 static const char alt_3[] =
1086 /* nopl 0(%[re]ax) */
1087 static const char alt_4[] =
1088 {0x0f,0x1f,0x40,0x00};
1089 /* nopl 0(%[re]ax,%[re]ax,1) */
1090 static const char alt_5[] =
1091 {0x0f,0x1f,0x44,0x00,0x00};
1092 /* nopw 0(%[re]ax,%[re]ax,1) */
1093 static const char alt_6[] =
1094 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1095 /* nopl 0L(%[re]ax) */
1096 static const char alt_7[] =
1097 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1098 /* nopl 0L(%[re]ax,%[re]ax,1) */
1099 static const char alt_8[] =
1100 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1101 /* nopw 0L(%[re]ax,%[re]ax,1) */
1102 static const char alt_9[] =
1103 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1104 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1105 static const char alt_10[] =
1106 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1108 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1109 static const char alt_long_11[] =
1111 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1114 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1115 static const char alt_long_12[] =
1118 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1122 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1123 static const char alt_long_13[] =
1127 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1132 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1133 static const char alt_long_14[] =
1138 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1144 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1145 static const char alt_long_15[] =
1151 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1152 /* nopl 0(%[re]ax,%[re]ax,1)
1153 nopw 0(%[re]ax,%[re]ax,1) */
1154 static const char alt_short_11[] =
1155 {0x0f,0x1f,0x44,0x00,0x00,
1156 0x66,0x0f,0x1f,0x44,0x00,0x00};
1157 /* nopw 0(%[re]ax,%[re]ax,1)
1158 nopw 0(%[re]ax,%[re]ax,1) */
1159 static const char alt_short_12[] =
1160 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1161 0x66,0x0f,0x1f,0x44,0x00,0x00};
1162 /* nopw 0(%[re]ax,%[re]ax,1)
1164 static const char alt_short_13[] =
1165 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1166 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1169 static const char alt_short_14[] =
1170 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1171 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1173 nopl 0L(%[re]ax,%[re]ax,1) */
1174 static const char alt_short_15[] =
1175 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1176 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1177 static const char *const alt_short_patt[] = {
1178 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1179 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1180 alt_short_14, alt_short_15
1182 static const char *const alt_long_patt[] = {
1183 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1184 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1185 alt_long_14, alt_long_15
1188 /* Only align for at least a positive non-zero boundary. */
1189 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1192 /* We need to decide which NOP sequence to use for 32bit and
1193 64bit. When -mtune= is used:
1195 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1196 PROCESSOR_GENERIC32, f32_patt will be used.
1197 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1198 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1199 PROCESSOR_GENERIC64, alt_long_patt will be used.
1200 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1201 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1204 When -mtune= isn't used, alt_long_patt will be used if
1205 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1208 When -march= or .arch is used, we can't use anything beyond
1209 cpu_arch_isa_flags. */
1211 if (flag_code == CODE_16BIT)
1215 memcpy (fragP->fr_literal + fragP->fr_fix,
1217 /* Adjust jump offset. */
1218 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1221 memcpy (fragP->fr_literal + fragP->fr_fix,
1222 f16_patt[count - 1], count);
1226 const char *const *patt = NULL;
1228 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1230 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1231 switch (cpu_arch_tune)
1233 case PROCESSOR_UNKNOWN:
1234 /* We use cpu_arch_isa_flags to check if we SHOULD
1235 optimize with nops. */
1236 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1237 patt = alt_long_patt;
1241 case PROCESSOR_PENTIUM4:
1242 case PROCESSOR_NOCONA:
1243 case PROCESSOR_CORE:
1244 case PROCESSOR_CORE2:
1245 case PROCESSOR_COREI7:
1246 case PROCESSOR_L1OM:
1247 case PROCESSOR_K1OM:
1248 case PROCESSOR_GENERIC64:
1249 patt = alt_long_patt;
1252 case PROCESSOR_ATHLON:
1254 case PROCESSOR_AMDFAM10:
1257 patt = alt_short_patt;
1259 case PROCESSOR_I386:
1260 case PROCESSOR_I486:
1261 case PROCESSOR_PENTIUM:
1262 case PROCESSOR_PENTIUMPRO:
1263 case PROCESSOR_GENERIC32:
1270 switch (fragP->tc_frag_data.tune)
1272 case PROCESSOR_UNKNOWN:
1273 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1274 PROCESSOR_UNKNOWN. */
1278 case PROCESSOR_I386:
1279 case PROCESSOR_I486:
1280 case PROCESSOR_PENTIUM:
1282 case PROCESSOR_ATHLON:
1284 case PROCESSOR_AMDFAM10:
1287 case PROCESSOR_GENERIC32:
1288 /* We use cpu_arch_isa_flags to check if we CAN optimize
1290 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1291 patt = alt_short_patt;
1295 case PROCESSOR_PENTIUMPRO:
1296 case PROCESSOR_PENTIUM4:
1297 case PROCESSOR_NOCONA:
1298 case PROCESSOR_CORE:
1299 case PROCESSOR_CORE2:
1300 case PROCESSOR_COREI7:
1301 case PROCESSOR_L1OM:
1302 case PROCESSOR_K1OM:
1303 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1304 patt = alt_long_patt;
1308 case PROCESSOR_GENERIC64:
1309 patt = alt_long_patt;
1314 if (patt == f32_patt)
1316 /* If the padding is less than 15 bytes, we use the normal
1317 ones. Otherwise, we use a jump instruction and adjust
1321 /* For 64bit, the limit is 3 bytes. */
1322 if (flag_code == CODE_64BIT
1323 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1328 memcpy (fragP->fr_literal + fragP->fr_fix,
1329 patt[count - 1], count);
1332 memcpy (fragP->fr_literal + fragP->fr_fix,
1334 /* Adjust jump offset. */
1335 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1340 /* Maximum length of an instruction is 15 byte. If the
1341 padding is greater than 15 bytes and we don't use jump,
1342 we have to break it into smaller pieces. */
1343 int padding = count;
1344 while (padding > 15)
1347 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1352 memcpy (fragP->fr_literal + fragP->fr_fix,
1353 patt [padding - 1], padding);
1356 fragP->fr_var = count;
1360 operand_type_all_zero (const union i386_operand_type *x)
1362 switch (ARRAY_SIZE(x->array))
1371 return !x->array[0];
1378 operand_type_set (union i386_operand_type *x, unsigned int v)
1380 switch (ARRAY_SIZE(x->array))
1395 operand_type_equal (const union i386_operand_type *x,
1396 const union i386_operand_type *y)
1398 switch (ARRAY_SIZE(x->array))
1401 if (x->array[2] != y->array[2])
1404 if (x->array[1] != y->array[1])
1407 return x->array[0] == y->array[0];
1415 cpu_flags_all_zero (const union i386_cpu_flags *x)
1417 switch (ARRAY_SIZE(x->array))
1426 return !x->array[0];
1433 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1435 switch (ARRAY_SIZE(x->array))
1450 cpu_flags_equal (const union i386_cpu_flags *x,
1451 const union i386_cpu_flags *y)
1453 switch (ARRAY_SIZE(x->array))
1456 if (x->array[2] != y->array[2])
1459 if (x->array[1] != y->array[1])
1462 return x->array[0] == y->array[0];
1470 cpu_flags_check_cpu64 (i386_cpu_flags f)
1472 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1473 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1476 static INLINE i386_cpu_flags
1477 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1479 switch (ARRAY_SIZE (x.array))
1482 x.array [2] &= y.array [2];
1484 x.array [1] &= y.array [1];
1486 x.array [0] &= y.array [0];
1494 static INLINE i386_cpu_flags
1495 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1497 switch (ARRAY_SIZE (x.array))
1500 x.array [2] |= y.array [2];
1502 x.array [1] |= y.array [1];
1504 x.array [0] |= y.array [0];
1512 static INLINE i386_cpu_flags
1513 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1515 switch (ARRAY_SIZE (x.array))
1518 x.array [2] &= ~y.array [2];
1520 x.array [1] &= ~y.array [1];
1522 x.array [0] &= ~y.array [0];
1530 #define CPU_FLAGS_ARCH_MATCH 0x1
1531 #define CPU_FLAGS_64BIT_MATCH 0x2
1532 #define CPU_FLAGS_AES_MATCH 0x4
1533 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1534 #define CPU_FLAGS_AVX_MATCH 0x10
1536 #define CPU_FLAGS_32BIT_MATCH \
1537 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1538 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1539 #define CPU_FLAGS_PERFECT_MATCH \
1540 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1542 /* Return CPU flags match bits. */
1545 cpu_flags_match (const insn_template *t)
1547 i386_cpu_flags x = t->cpu_flags;
1548 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1550 x.bitfield.cpu64 = 0;
1551 x.bitfield.cpuno64 = 0;
1553 if (cpu_flags_all_zero (&x))
1555 /* This instruction is available on all archs. */
1556 match |= CPU_FLAGS_32BIT_MATCH;
1560 /* This instruction is available only on some archs. */
1561 i386_cpu_flags cpu = cpu_arch_flags;
1563 cpu.bitfield.cpu64 = 0;
1564 cpu.bitfield.cpuno64 = 0;
1565 cpu = cpu_flags_and (x, cpu);
1566 if (!cpu_flags_all_zero (&cpu))
1568 if (x.bitfield.cpuavx)
1570 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1571 if (cpu.bitfield.cpuavx)
1573 /* Check SSE2AVX. */
1574 if (!t->opcode_modifier.sse2avx|| sse2avx)
1576 match |= (CPU_FLAGS_ARCH_MATCH
1577 | CPU_FLAGS_AVX_MATCH);
1579 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1580 match |= CPU_FLAGS_AES_MATCH;
1582 if (!x.bitfield.cpupclmul
1583 || cpu.bitfield.cpupclmul)
1584 match |= CPU_FLAGS_PCLMUL_MATCH;
1588 match |= CPU_FLAGS_ARCH_MATCH;
1591 match |= CPU_FLAGS_32BIT_MATCH;
1597 static INLINE i386_operand_type
1598 operand_type_and (i386_operand_type x, i386_operand_type y)
1600 switch (ARRAY_SIZE (x.array))
1603 x.array [2] &= y.array [2];
1605 x.array [1] &= y.array [1];
1607 x.array [0] &= y.array [0];
1615 static INLINE i386_operand_type
1616 operand_type_or (i386_operand_type x, i386_operand_type y)
1618 switch (ARRAY_SIZE (x.array))
1621 x.array [2] |= y.array [2];
1623 x.array [1] |= y.array [1];
1625 x.array [0] |= y.array [0];
1633 static INLINE i386_operand_type
1634 operand_type_xor (i386_operand_type x, i386_operand_type y)
1636 switch (ARRAY_SIZE (x.array))
1639 x.array [2] ^= y.array [2];
1641 x.array [1] ^= y.array [1];
1643 x.array [0] ^= y.array [0];
1651 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1652 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1653 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1654 static const i386_operand_type inoutportreg
1655 = OPERAND_TYPE_INOUTPORTREG;
1656 static const i386_operand_type reg16_inoutportreg
1657 = OPERAND_TYPE_REG16_INOUTPORTREG;
1658 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1659 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1660 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1661 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1662 static const i386_operand_type anydisp
1663 = OPERAND_TYPE_ANYDISP;
1664 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1665 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1666 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1667 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1668 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1669 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1670 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1671 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1672 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1673 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1674 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1675 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1676 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1677 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1688 operand_type_check (i386_operand_type t, enum operand_type c)
1693 return (t.bitfield.reg8
1696 || t.bitfield.reg64);
1699 return (t.bitfield.imm8
1703 || t.bitfield.imm32s
1704 || t.bitfield.imm64);
1707 return (t.bitfield.disp8
1708 || t.bitfield.disp16
1709 || t.bitfield.disp32
1710 || t.bitfield.disp32s
1711 || t.bitfield.disp64);
1714 return (t.bitfield.disp8
1715 || t.bitfield.disp16
1716 || t.bitfield.disp32
1717 || t.bitfield.disp32s
1718 || t.bitfield.disp64
1719 || t.bitfield.baseindex);
1728 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1729 operand J for instruction template T. */
1732 match_reg_size (const insn_template *t, unsigned int j)
1734 return !((i.types[j].bitfield.byte
1735 && !t->operand_types[j].bitfield.byte)
1736 || (i.types[j].bitfield.word
1737 && !t->operand_types[j].bitfield.word)
1738 || (i.types[j].bitfield.dword
1739 && !t->operand_types[j].bitfield.dword)
1740 || (i.types[j].bitfield.qword
1741 && !t->operand_types[j].bitfield.qword));
1744 /* Return 1 if there is no conflict in any size on operand J for
1745 instruction template T. */
1748 match_mem_size (const insn_template *t, unsigned int j)
1750 return (match_reg_size (t, j)
1751 && !((i.types[j].bitfield.unspecified
1752 && !t->operand_types[j].bitfield.unspecified)
1753 || (i.types[j].bitfield.fword
1754 && !t->operand_types[j].bitfield.fword)
1755 || (i.types[j].bitfield.tbyte
1756 && !t->operand_types[j].bitfield.tbyte)
1757 || (i.types[j].bitfield.xmmword
1758 && !t->operand_types[j].bitfield.xmmword)
1759 || (i.types[j].bitfield.ymmword
1760 && !t->operand_types[j].bitfield.ymmword)
1761 || (i.types[j].bitfield.zmmword
1762 && !t->operand_types[j].bitfield.zmmword)));
1765 /* Return 1 if there is no size conflict on any operands for
1766 instruction template T. */
1769 operand_size_match (const insn_template *t)
1774 /* Don't check jump instructions. */
1775 if (t->opcode_modifier.jump
1776 || t->opcode_modifier.jumpbyte
1777 || t->opcode_modifier.jumpdword
1778 || t->opcode_modifier.jumpintersegment)
1781 /* Check memory and accumulator operand size. */
1782 for (j = 0; j < i.operands; j++)
1784 if (t->operand_types[j].bitfield.anysize)
1787 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1793 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1802 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1805 i.error = operand_size_mismatch;
1809 /* Check reverse. */
1810 gas_assert (i.operands == 2);
1813 for (j = 0; j < 2; j++)
1815 if (t->operand_types[j].bitfield.acc
1816 && !match_reg_size (t, j ? 0 : 1))
1819 if (i.types[j].bitfield.mem
1820 && !match_mem_size (t, j ? 0 : 1))
1828 operand_type_match (i386_operand_type overlap,
1829 i386_operand_type given)
1831 i386_operand_type temp = overlap;
1833 temp.bitfield.jumpabsolute = 0;
1834 temp.bitfield.unspecified = 0;
1835 temp.bitfield.byte = 0;
1836 temp.bitfield.word = 0;
1837 temp.bitfield.dword = 0;
1838 temp.bitfield.fword = 0;
1839 temp.bitfield.qword = 0;
1840 temp.bitfield.tbyte = 0;
1841 temp.bitfield.xmmword = 0;
1842 temp.bitfield.ymmword = 0;
1843 temp.bitfield.zmmword = 0;
1844 if (operand_type_all_zero (&temp))
1847 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1848 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1852 i.error = operand_type_mismatch;
1856 /* If given types g0 and g1 are registers they must be of the same type
1857 unless the expected operand type register overlap is null.
1858 Note that Acc in a template matches every size of reg. */
1861 operand_type_register_match (i386_operand_type m0,
1862 i386_operand_type g0,
1863 i386_operand_type t0,
1864 i386_operand_type m1,
1865 i386_operand_type g1,
1866 i386_operand_type t1)
1868 if (!operand_type_check (g0, reg))
1871 if (!operand_type_check (g1, reg))
1874 if (g0.bitfield.reg8 == g1.bitfield.reg8
1875 && g0.bitfield.reg16 == g1.bitfield.reg16
1876 && g0.bitfield.reg32 == g1.bitfield.reg32
1877 && g0.bitfield.reg64 == g1.bitfield.reg64)
1880 if (m0.bitfield.acc)
1882 t0.bitfield.reg8 = 1;
1883 t0.bitfield.reg16 = 1;
1884 t0.bitfield.reg32 = 1;
1885 t0.bitfield.reg64 = 1;
1888 if (m1.bitfield.acc)
1890 t1.bitfield.reg8 = 1;
1891 t1.bitfield.reg16 = 1;
1892 t1.bitfield.reg32 = 1;
1893 t1.bitfield.reg64 = 1;
1896 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1897 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1898 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1899 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1902 i.error = register_type_mismatch;
1907 static INLINE unsigned int
1908 register_number (const reg_entry *r)
1910 unsigned int nr = r->reg_num;
1912 if (r->reg_flags & RegRex)
1918 static INLINE unsigned int
1919 mode_from_disp_size (i386_operand_type t)
1921 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1923 else if (t.bitfield.disp16
1924 || t.bitfield.disp32
1925 || t.bitfield.disp32s)
1932 fits_in_signed_byte (offsetT num)
1934 return (num >= -128) && (num <= 127);
1938 fits_in_unsigned_byte (offsetT num)
1940 return (num & 0xff) == num;
1944 fits_in_unsigned_word (offsetT num)
1946 return (num & 0xffff) == num;
1950 fits_in_signed_word (offsetT num)
1952 return (-32768 <= num) && (num <= 32767);
1956 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1961 return (!(((offsetT) -1 << 31) & num)
1962 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1964 } /* fits_in_signed_long() */
1967 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1972 return (num & (((offsetT) 2 << 31) - 1)) == num;
1974 } /* fits_in_unsigned_long() */
1977 fits_in_vec_disp8 (offsetT num)
1979 int shift = i.memshift;
1985 mask = (1 << shift) - 1;
1987 /* Return 0 if NUM isn't properly aligned. */
1991 /* Check if NUM will fit in 8bit after shift. */
1992 return fits_in_signed_byte (num >> shift);
1996 fits_in_imm4 (offsetT num)
1998 return (num & 0xf) == num;
2001 static i386_operand_type
2002 smallest_imm_type (offsetT num)
2004 i386_operand_type t;
2006 operand_type_set (&t, 0);
2007 t.bitfield.imm64 = 1;
2009 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2011 /* This code is disabled on the 486 because all the Imm1 forms
2012 in the opcode table are slower on the i486. They're the
2013 versions with the implicitly specified single-position
2014 displacement, which has another syntax if you really want to
2016 t.bitfield.imm1 = 1;
2017 t.bitfield.imm8 = 1;
2018 t.bitfield.imm8s = 1;
2019 t.bitfield.imm16 = 1;
2020 t.bitfield.imm32 = 1;
2021 t.bitfield.imm32s = 1;
2023 else if (fits_in_signed_byte (num))
2025 t.bitfield.imm8 = 1;
2026 t.bitfield.imm8s = 1;
2027 t.bitfield.imm16 = 1;
2028 t.bitfield.imm32 = 1;
2029 t.bitfield.imm32s = 1;
2031 else if (fits_in_unsigned_byte (num))
2033 t.bitfield.imm8 = 1;
2034 t.bitfield.imm16 = 1;
2035 t.bitfield.imm32 = 1;
2036 t.bitfield.imm32s = 1;
2038 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2040 t.bitfield.imm16 = 1;
2041 t.bitfield.imm32 = 1;
2042 t.bitfield.imm32s = 1;
2044 else if (fits_in_signed_long (num))
2046 t.bitfield.imm32 = 1;
2047 t.bitfield.imm32s = 1;
2049 else if (fits_in_unsigned_long (num))
2050 t.bitfield.imm32 = 1;
2056 offset_in_range (offsetT val, int size)
2062 case 1: mask = ((addressT) 1 << 8) - 1; break;
2063 case 2: mask = ((addressT) 1 << 16) - 1; break;
2064 case 4: mask = ((addressT) 2 << 31) - 1; break;
2066 case 8: mask = ((addressT) 2 << 63) - 1; break;
2072 /* If BFD64, sign extend val for 32bit address mode. */
2073 if (flag_code != CODE_64BIT
2074 || i.prefix[ADDR_PREFIX])
2075 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2076 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2079 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2081 char buf1[40], buf2[40];
2083 sprint_value (buf1, val);
2084 sprint_value (buf2, val & mask);
2085 as_warn (_("%s shortened to %s"), buf1, buf2);
2099 a. PREFIX_EXIST if attempting to add a prefix where one from the
2100 same class already exists.
2101 b. PREFIX_LOCK if lock prefix is added.
2102 c. PREFIX_REP if rep/repne prefix is added.
2103 d. PREFIX_OTHER if other prefix is added.
2106 static enum PREFIX_GROUP
2107 add_prefix (unsigned int prefix)
2109 enum PREFIX_GROUP ret = PREFIX_OTHER;
2112 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2113 && flag_code == CODE_64BIT)
2115 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2116 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2117 && (prefix & (REX_R | REX_X | REX_B))))
2128 case CS_PREFIX_OPCODE:
2129 case DS_PREFIX_OPCODE:
2130 case ES_PREFIX_OPCODE:
2131 case FS_PREFIX_OPCODE:
2132 case GS_PREFIX_OPCODE:
2133 case SS_PREFIX_OPCODE:
2137 case REPNE_PREFIX_OPCODE:
2138 case REPE_PREFIX_OPCODE:
2143 case LOCK_PREFIX_OPCODE:
2152 case ADDR_PREFIX_OPCODE:
2156 case DATA_PREFIX_OPCODE:
2160 if (i.prefix[q] != 0)
2168 i.prefix[q] |= prefix;
2171 as_bad (_("same type of prefix used twice"));
2177 update_code_flag (int value, int check)
2179 PRINTF_LIKE ((*as_error));
2181 flag_code = (enum flag_code) value;
2182 if (flag_code == CODE_64BIT)
2184 cpu_arch_flags.bitfield.cpu64 = 1;
2185 cpu_arch_flags.bitfield.cpuno64 = 0;
2189 cpu_arch_flags.bitfield.cpu64 = 0;
2190 cpu_arch_flags.bitfield.cpuno64 = 1;
2192 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2195 as_error = as_fatal;
2198 (*as_error) (_("64bit mode not supported on `%s'."),
2199 cpu_arch_name ? cpu_arch_name : default_arch);
2201 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2204 as_error = as_fatal;
2207 (*as_error) (_("32bit mode not supported on `%s'."),
2208 cpu_arch_name ? cpu_arch_name : default_arch);
2210 stackop_size = '\0';
2214 set_code_flag (int value)
2216 update_code_flag (value, 0);
2220 set_16bit_gcc_code_flag (int new_code_flag)
2222 flag_code = (enum flag_code) new_code_flag;
2223 if (flag_code != CODE_16BIT)
2225 cpu_arch_flags.bitfield.cpu64 = 0;
2226 cpu_arch_flags.bitfield.cpuno64 = 1;
2227 stackop_size = LONG_MNEM_SUFFIX;
2231 set_intel_syntax (int syntax_flag)
2233 /* Find out if register prefixing is specified. */
2234 int ask_naked_reg = 0;
2237 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2239 char *string = input_line_pointer;
2240 int e = get_symbol_end ();
2242 if (strcmp (string, "prefix") == 0)
2244 else if (strcmp (string, "noprefix") == 0)
2247 as_bad (_("bad argument to syntax directive."));
2248 *input_line_pointer = e;
2250 demand_empty_rest_of_line ();
2252 intel_syntax = syntax_flag;
2254 if (ask_naked_reg == 0)
2255 allow_naked_reg = (intel_syntax
2256 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2258 allow_naked_reg = (ask_naked_reg < 0);
2260 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2262 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2263 identifier_chars['$'] = intel_syntax ? '$' : 0;
2264 register_prefix = allow_naked_reg ? "" : "%";
2268 set_intel_mnemonic (int mnemonic_flag)
2270 intel_mnemonic = mnemonic_flag;
2274 set_allow_index_reg (int flag)
2276 allow_index_reg = flag;
2280 set_check (int what)
2282 enum check_kind *kind;
2287 kind = &operand_check;
2298 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2300 char *string = input_line_pointer;
2301 int e = get_symbol_end ();
2303 if (strcmp (string, "none") == 0)
2305 else if (strcmp (string, "warning") == 0)
2306 *kind = check_warning;
2307 else if (strcmp (string, "error") == 0)
2308 *kind = check_error;
2310 as_bad (_("bad argument to %s_check directive."), str);
2311 *input_line_pointer = e;
2314 as_bad (_("missing argument for %s_check directive"), str);
2316 demand_empty_rest_of_line ();
2320 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2321 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2323 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2324 static const char *arch;
2326 /* Intel LIOM is only supported on ELF. */
2332 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2333 use default_arch. */
2334 arch = cpu_arch_name;
2336 arch = default_arch;
2339 /* If we are targeting Intel L1OM, we must enable it. */
2340 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2341 || new_flag.bitfield.cpul1om)
2344 /* If we are targeting Intel K1OM, we must enable it. */
2345 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2346 || new_flag.bitfield.cpuk1om)
2349 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2354 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2358 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2360 char *string = input_line_pointer;
2361 int e = get_symbol_end ();
2363 i386_cpu_flags flags;
2365 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2367 if (strcmp (string, cpu_arch[j].name) == 0)
2369 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2373 cpu_arch_name = cpu_arch[j].name;
2374 cpu_sub_arch_name = NULL;
2375 cpu_arch_flags = cpu_arch[j].flags;
2376 if (flag_code == CODE_64BIT)
2378 cpu_arch_flags.bitfield.cpu64 = 1;
2379 cpu_arch_flags.bitfield.cpuno64 = 0;
2383 cpu_arch_flags.bitfield.cpu64 = 0;
2384 cpu_arch_flags.bitfield.cpuno64 = 1;
2386 cpu_arch_isa = cpu_arch[j].type;
2387 cpu_arch_isa_flags = cpu_arch[j].flags;
2388 if (!cpu_arch_tune_set)
2390 cpu_arch_tune = cpu_arch_isa;
2391 cpu_arch_tune_flags = cpu_arch_isa_flags;
2396 if (!cpu_arch[j].negated)
2397 flags = cpu_flags_or (cpu_arch_flags,
2400 flags = cpu_flags_and_not (cpu_arch_flags,
2402 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2404 if (cpu_sub_arch_name)
2406 char *name = cpu_sub_arch_name;
2407 cpu_sub_arch_name = concat (name,
2409 (const char *) NULL);
2413 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2414 cpu_arch_flags = flags;
2415 cpu_arch_isa_flags = flags;
2417 *input_line_pointer = e;
2418 demand_empty_rest_of_line ();
2422 if (j >= ARRAY_SIZE (cpu_arch))
2423 as_bad (_("no such architecture: `%s'"), string);
2425 *input_line_pointer = e;
2428 as_bad (_("missing cpu architecture"));
2430 no_cond_jump_promotion = 0;
2431 if (*input_line_pointer == ','
2432 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2434 char *string = ++input_line_pointer;
2435 int e = get_symbol_end ();
2437 if (strcmp (string, "nojumps") == 0)
2438 no_cond_jump_promotion = 1;
2439 else if (strcmp (string, "jumps") == 0)
2442 as_bad (_("no such architecture modifier: `%s'"), string);
2444 *input_line_pointer = e;
2447 demand_empty_rest_of_line ();
2450 enum bfd_architecture
2453 if (cpu_arch_isa == PROCESSOR_L1OM)
2455 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2456 || flag_code != CODE_64BIT)
2457 as_fatal (_("Intel L1OM is 64bit ELF only"));
2458 return bfd_arch_l1om;
2460 else if (cpu_arch_isa == PROCESSOR_K1OM)
2462 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2463 || flag_code != CODE_64BIT)
2464 as_fatal (_("Intel K1OM is 64bit ELF only"));
2465 return bfd_arch_k1om;
2468 return bfd_arch_i386;
2474 if (!strncmp (default_arch, "x86_64", 6))
2476 if (cpu_arch_isa == PROCESSOR_L1OM)
2478 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2479 || default_arch[6] != '\0')
2480 as_fatal (_("Intel L1OM is 64bit ELF only"));
2481 return bfd_mach_l1om;
2483 else if (cpu_arch_isa == PROCESSOR_K1OM)
2485 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2486 || default_arch[6] != '\0')
2487 as_fatal (_("Intel K1OM is 64bit ELF only"));
2488 return bfd_mach_k1om;
2490 else if (default_arch[6] == '\0')
2491 return bfd_mach_x86_64;
2493 return bfd_mach_x64_32;
2495 else if (!strcmp (default_arch, "i386"))
2496 return bfd_mach_i386_i386;
2498 as_fatal (_("unknown architecture"));
2504 const char *hash_err;
2506 /* Initialize op_hash hash table. */
2507 op_hash = hash_new ();
2510 const insn_template *optab;
2511 templates *core_optab;
2513 /* Setup for loop. */
2515 core_optab = (templates *) xmalloc (sizeof (templates));
2516 core_optab->start = optab;
2521 if (optab->name == NULL
2522 || strcmp (optab->name, (optab - 1)->name) != 0)
2524 /* different name --> ship out current template list;
2525 add to hash table; & begin anew. */
2526 core_optab->end = optab;
2527 hash_err = hash_insert (op_hash,
2529 (void *) core_optab);
2532 as_fatal (_("can't hash %s: %s"),
2536 if (optab->name == NULL)
2538 core_optab = (templates *) xmalloc (sizeof (templates));
2539 core_optab->start = optab;
2544 /* Initialize reg_hash hash table. */
2545 reg_hash = hash_new ();
2547 const reg_entry *regtab;
2548 unsigned int regtab_size = i386_regtab_size;
2550 for (regtab = i386_regtab; regtab_size--; regtab++)
2552 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2554 as_fatal (_("can't hash %s: %s"),
2560 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2565 for (c = 0; c < 256; c++)
2570 mnemonic_chars[c] = c;
2571 register_chars[c] = c;
2572 operand_chars[c] = c;
2574 else if (ISLOWER (c))
2576 mnemonic_chars[c] = c;
2577 register_chars[c] = c;
2578 operand_chars[c] = c;
2580 else if (ISUPPER (c))
2582 mnemonic_chars[c] = TOLOWER (c);
2583 register_chars[c] = mnemonic_chars[c];
2584 operand_chars[c] = c;
2586 else if (c == '{' || c == '}')
2587 operand_chars[c] = c;
2589 if (ISALPHA (c) || ISDIGIT (c))
2590 identifier_chars[c] = c;
2593 identifier_chars[c] = c;
2594 operand_chars[c] = c;
2599 identifier_chars['@'] = '@';
2602 identifier_chars['?'] = '?';
2603 operand_chars['?'] = '?';
2605 digit_chars['-'] = '-';
2606 mnemonic_chars['_'] = '_';
2607 mnemonic_chars['-'] = '-';
2608 mnemonic_chars['.'] = '.';
2609 identifier_chars['_'] = '_';
2610 identifier_chars['.'] = '.';
2612 for (p = operand_special_chars; *p != '\0'; p++)
2613 operand_chars[(unsigned char) *p] = *p;
2616 if (flag_code == CODE_64BIT)
2618 #if defined (OBJ_COFF) && defined (TE_PE)
2619 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2622 x86_dwarf2_return_column = 16;
2624 x86_cie_data_alignment = -8;
2628 x86_dwarf2_return_column = 8;
2629 x86_cie_data_alignment = -4;
2634 i386_print_statistics (FILE *file)
2636 hash_print_statistics (file, "i386 opcode", op_hash);
2637 hash_print_statistics (file, "i386 register", reg_hash);
2642 /* Debugging routines for md_assemble. */
2643 static void pte (insn_template *);
2644 static void pt (i386_operand_type);
2645 static void pe (expressionS *);
2646 static void ps (symbolS *);
2649 pi (char *line, i386_insn *x)
2653 fprintf (stdout, "%s: template ", line);
2655 fprintf (stdout, " address: base %s index %s scale %x\n",
2656 x->base_reg ? x->base_reg->reg_name : "none",
2657 x->index_reg ? x->index_reg->reg_name : "none",
2658 x->log2_scale_factor);
2659 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2660 x->rm.mode, x->rm.reg, x->rm.regmem);
2661 fprintf (stdout, " sib: base %x index %x scale %x\n",
2662 x->sib.base, x->sib.index, x->sib.scale);
2663 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2664 (x->rex & REX_W) != 0,
2665 (x->rex & REX_R) != 0,
2666 (x->rex & REX_X) != 0,
2667 (x->rex & REX_B) != 0);
2668 for (j = 0; j < x->operands; j++)
2670 fprintf (stdout, " #%d: ", j + 1);
2672 fprintf (stdout, "\n");
2673 if (x->types[j].bitfield.reg8
2674 || x->types[j].bitfield.reg16
2675 || x->types[j].bitfield.reg32
2676 || x->types[j].bitfield.reg64
2677 || x->types[j].bitfield.regmmx
2678 || x->types[j].bitfield.regxmm
2679 || x->types[j].bitfield.regymm
2680 || x->types[j].bitfield.regzmm
2681 || x->types[j].bitfield.sreg2
2682 || x->types[j].bitfield.sreg3
2683 || x->types[j].bitfield.control
2684 || x->types[j].bitfield.debug
2685 || x->types[j].bitfield.test)
2686 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2687 if (operand_type_check (x->types[j], imm))
2689 if (operand_type_check (x->types[j], disp))
2690 pe (x->op[j].disps);
2695 pte (insn_template *t)
2698 fprintf (stdout, " %d operands ", t->operands);
2699 fprintf (stdout, "opcode %x ", t->base_opcode);
2700 if (t->extension_opcode != None)
2701 fprintf (stdout, "ext %x ", t->extension_opcode);
2702 if (t->opcode_modifier.d)
2703 fprintf (stdout, "D");
2704 if (t->opcode_modifier.w)
2705 fprintf (stdout, "W");
2706 fprintf (stdout, "\n");
2707 for (j = 0; j < t->operands; j++)
2709 fprintf (stdout, " #%d type ", j + 1);
2710 pt (t->operand_types[j]);
2711 fprintf (stdout, "\n");
2718 fprintf (stdout, " operation %d\n", e->X_op);
2719 fprintf (stdout, " add_number %ld (%lx)\n",
2720 (long) e->X_add_number, (long) e->X_add_number);
2721 if (e->X_add_symbol)
2723 fprintf (stdout, " add_symbol ");
2724 ps (e->X_add_symbol);
2725 fprintf (stdout, "\n");
2729 fprintf (stdout, " op_symbol ");
2730 ps (e->X_op_symbol);
2731 fprintf (stdout, "\n");
2738 fprintf (stdout, "%s type %s%s",
2740 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2741 segment_name (S_GET_SEGMENT (s)));
2744 static struct type_name
2746 i386_operand_type mask;
2749 const type_names[] =
2751 { OPERAND_TYPE_REG8, "r8" },
2752 { OPERAND_TYPE_REG16, "r16" },
2753 { OPERAND_TYPE_REG32, "r32" },
2754 { OPERAND_TYPE_REG64, "r64" },
2755 { OPERAND_TYPE_IMM8, "i8" },
2756 { OPERAND_TYPE_IMM8, "i8s" },
2757 { OPERAND_TYPE_IMM16, "i16" },
2758 { OPERAND_TYPE_IMM32, "i32" },
2759 { OPERAND_TYPE_IMM32S, "i32s" },
2760 { OPERAND_TYPE_IMM64, "i64" },
2761 { OPERAND_TYPE_IMM1, "i1" },
2762 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2763 { OPERAND_TYPE_DISP8, "d8" },
2764 { OPERAND_TYPE_DISP16, "d16" },
2765 { OPERAND_TYPE_DISP32, "d32" },
2766 { OPERAND_TYPE_DISP32S, "d32s" },
2767 { OPERAND_TYPE_DISP64, "d64" },
2768 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2769 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2770 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2771 { OPERAND_TYPE_CONTROL, "control reg" },
2772 { OPERAND_TYPE_TEST, "test reg" },
2773 { OPERAND_TYPE_DEBUG, "debug reg" },
2774 { OPERAND_TYPE_FLOATREG, "FReg" },
2775 { OPERAND_TYPE_FLOATACC, "FAcc" },
2776 { OPERAND_TYPE_SREG2, "SReg2" },
2777 { OPERAND_TYPE_SREG3, "SReg3" },
2778 { OPERAND_TYPE_ACC, "Acc" },
2779 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2780 { OPERAND_TYPE_REGMMX, "rMMX" },
2781 { OPERAND_TYPE_REGXMM, "rXMM" },
2782 { OPERAND_TYPE_REGYMM, "rYMM" },
2783 { OPERAND_TYPE_REGZMM, "rZMM" },
2784 { OPERAND_TYPE_REGMASK, "Mask reg" },
2785 { OPERAND_TYPE_ESSEG, "es" },
2789 pt (i386_operand_type t)
2792 i386_operand_type a;
2794 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2796 a = operand_type_and (t, type_names[j].mask);
2797 if (!operand_type_all_zero (&a))
2798 fprintf (stdout, "%s, ", type_names[j].name);
2803 #endif /* DEBUG386 */
2805 static bfd_reloc_code_real_type
2806 reloc (unsigned int size,
2810 bfd_reloc_code_real_type other)
2812 if (other != NO_RELOC)
2814 reloc_howto_type *rel;
2819 case BFD_RELOC_X86_64_GOT32:
2820 return BFD_RELOC_X86_64_GOT64;
2822 case BFD_RELOC_X86_64_PLTOFF64:
2823 return BFD_RELOC_X86_64_PLTOFF64;
2825 case BFD_RELOC_X86_64_GOTPC32:
2826 other = BFD_RELOC_X86_64_GOTPC64;
2828 case BFD_RELOC_X86_64_GOTPCREL:
2829 other = BFD_RELOC_X86_64_GOTPCREL64;
2831 case BFD_RELOC_X86_64_TPOFF32:
2832 other = BFD_RELOC_X86_64_TPOFF64;
2834 case BFD_RELOC_X86_64_DTPOFF32:
2835 other = BFD_RELOC_X86_64_DTPOFF64;
2841 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2842 if (other == BFD_RELOC_SIZE32)
2845 return BFD_RELOC_SIZE64;
2847 as_bad (_("there are no pc-relative size relocations"));
2851 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2852 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2855 rel = bfd_reloc_type_lookup (stdoutput, other);
2857 as_bad (_("unknown relocation (%u)"), other);
2858 else if (size != bfd_get_reloc_size (rel))
2859 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2860 bfd_get_reloc_size (rel),
2862 else if (pcrel && !rel->pc_relative)
2863 as_bad (_("non-pc-relative relocation for pc-relative field"));
2864 else if ((rel->complain_on_overflow == complain_overflow_signed
2866 || (rel->complain_on_overflow == complain_overflow_unsigned
2868 as_bad (_("relocated field and relocation type differ in signedness"));
2877 as_bad (_("there are no unsigned pc-relative relocations"));
2880 case 1: return BFD_RELOC_8_PCREL;
2881 case 2: return BFD_RELOC_16_PCREL;
2882 case 4: return (bnd_prefix && object_64bit
2883 ? BFD_RELOC_X86_64_PC32_BND
2884 : BFD_RELOC_32_PCREL);
2885 case 8: return BFD_RELOC_64_PCREL;
2887 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2894 case 4: return BFD_RELOC_X86_64_32S;
2899 case 1: return BFD_RELOC_8;
2900 case 2: return BFD_RELOC_16;
2901 case 4: return BFD_RELOC_32;
2902 case 8: return BFD_RELOC_64;
2904 as_bad (_("cannot do %s %u byte relocation"),
2905 sign > 0 ? "signed" : "unsigned", size);
2911 /* Here we decide which fixups can be adjusted to make them relative to
2912 the beginning of the section instead of the symbol. Basically we need
2913 to make sure that the dynamic relocations are done correctly, so in
2914 some cases we force the original symbol to be used. */
2917 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2919 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2923 /* Don't adjust pc-relative references to merge sections in 64-bit
2925 if (use_rela_relocations
2926 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2930 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2931 and changed later by validate_fix. */
2932 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2933 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2936 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2937 for size relocations. */
2938 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2939 || fixP->fx_r_type == BFD_RELOC_SIZE64
2940 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2941 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2942 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2943 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2944 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2945 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2946 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2947 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2948 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2949 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2950 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2951 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2952 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2953 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2954 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2955 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2956 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2957 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2958 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2959 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2960 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2961 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2962 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2963 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2964 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2965 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2966 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2967 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2974 intel_float_operand (const char *mnemonic)
2976 /* Note that the value returned is meaningful only for opcodes with (memory)
2977 operands, hence the code here is free to improperly handle opcodes that
2978 have no operands (for better performance and smaller code). */
2980 if (mnemonic[0] != 'f')
2981 return 0; /* non-math */
2983 switch (mnemonic[1])
2985 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2986 the fs segment override prefix not currently handled because no
2987 call path can make opcodes without operands get here */
2989 return 2 /* integer op */;
2991 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2992 return 3; /* fldcw/fldenv */
2995 if (mnemonic[2] != 'o' /* fnop */)
2996 return 3; /* non-waiting control op */
2999 if (mnemonic[2] == 's')
3000 return 3; /* frstor/frstpm */
3003 if (mnemonic[2] == 'a')
3004 return 3; /* fsave */
3005 if (mnemonic[2] == 't')
3007 switch (mnemonic[3])
3009 case 'c': /* fstcw */
3010 case 'd': /* fstdw */
3011 case 'e': /* fstenv */
3012 case 's': /* fsts[gw] */
3018 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3019 return 0; /* fxsave/fxrstor are not really math ops */
3026 /* Build the VEX prefix. */
3029 build_vex_prefix (const insn_template *t)
3031 unsigned int register_specifier;
3032 unsigned int implied_prefix;
3033 unsigned int vector_length;
3035 /* Check register specifier. */
3036 if (i.vex.register_specifier)
3038 register_specifier =
3039 ~register_number (i.vex.register_specifier) & 0xf;
3040 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3043 register_specifier = 0xf;
3045 /* Use 2-byte VEX prefix by swappping destination and source
3048 && i.operands == i.reg_operands
3049 && i.tm.opcode_modifier.vexopcode == VEX0F
3050 && i.tm.opcode_modifier.s
3053 unsigned int xchg = i.operands - 1;
3054 union i386_op temp_op;
3055 i386_operand_type temp_type;
3057 temp_type = i.types[xchg];
3058 i.types[xchg] = i.types[0];
3059 i.types[0] = temp_type;
3060 temp_op = i.op[xchg];
3061 i.op[xchg] = i.op[0];
3064 gas_assert (i.rm.mode == 3);
3068 i.rm.regmem = i.rm.reg;
3071 /* Use the next insn. */
3075 if (i.tm.opcode_modifier.vex == VEXScalar)
3076 vector_length = avxscalar;
3078 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3080 switch ((i.tm.base_opcode >> 8) & 0xff)
3085 case DATA_PREFIX_OPCODE:
3088 case REPE_PREFIX_OPCODE:
3091 case REPNE_PREFIX_OPCODE:
3098 /* Use 2-byte VEX prefix if possible. */
3099 if (i.tm.opcode_modifier.vexopcode == VEX0F
3100 && i.tm.opcode_modifier.vexw != VEXW1
3101 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3103 /* 2-byte VEX prefix. */
3107 i.vex.bytes[0] = 0xc5;
3109 /* Check the REX.R bit. */
3110 r = (i.rex & REX_R) ? 0 : 1;
3111 i.vex.bytes[1] = (r << 7
3112 | register_specifier << 3
3113 | vector_length << 2
3118 /* 3-byte VEX prefix. */
3123 switch (i.tm.opcode_modifier.vexopcode)
3127 i.vex.bytes[0] = 0xc4;
3131 i.vex.bytes[0] = 0xc4;
3135 i.vex.bytes[0] = 0xc4;
3139 i.vex.bytes[0] = 0x8f;
3143 i.vex.bytes[0] = 0x8f;
3147 i.vex.bytes[0] = 0x8f;
3153 /* The high 3 bits of the second VEX byte are 1's compliment
3154 of RXB bits from REX. */
3155 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3157 /* Check the REX.W bit. */
3158 w = (i.rex & REX_W) ? 1 : 0;
3159 if (i.tm.opcode_modifier.vexw)
3164 if (i.tm.opcode_modifier.vexw == VEXW1)
3168 i.vex.bytes[2] = (w << 7
3169 | register_specifier << 3
3170 | vector_length << 2
3175 /* Build the EVEX prefix. */
3178 build_evex_prefix (void)
3180 unsigned int register_specifier;
3181 unsigned int implied_prefix;
3183 rex_byte vrex_used = 0;
3185 /* Check register specifier. */
3186 if (i.vex.register_specifier)
3188 gas_assert ((i.vrex & REX_X) == 0);
3190 register_specifier = i.vex.register_specifier->reg_num;
3191 if ((i.vex.register_specifier->reg_flags & RegRex))
3192 register_specifier += 8;
3193 /* The upper 16 registers are encoded in the fourth byte of the
3195 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3196 i.vex.bytes[3] = 0x8;
3197 register_specifier = ~register_specifier & 0xf;
3201 register_specifier = 0xf;
3203 /* Encode upper 16 vector index register in the fourth byte of
3205 if (!(i.vrex & REX_X))
3206 i.vex.bytes[3] = 0x8;
3211 switch ((i.tm.base_opcode >> 8) & 0xff)
3216 case DATA_PREFIX_OPCODE:
3219 case REPE_PREFIX_OPCODE:
3222 case REPNE_PREFIX_OPCODE:
3229 /* 4 byte EVEX prefix. */
3231 i.vex.bytes[0] = 0x62;
3234 switch (i.tm.opcode_modifier.vexopcode)
3250 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3252 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3254 /* The fifth bit of the second EVEX byte is 1's compliment of the
3255 REX_R bit in VREX. */
3256 if (!(i.vrex & REX_R))
3257 i.vex.bytes[1] |= 0x10;
3261 if ((i.reg_operands + i.imm_operands) == i.operands)
3263 /* When all operands are registers, the REX_X bit in REX is not
3264 used. We reuse it to encode the upper 16 registers, which is
3265 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3266 as 1's compliment. */
3267 if ((i.vrex & REX_B))
3270 i.vex.bytes[1] &= ~0x40;
3274 /* EVEX instructions shouldn't need the REX prefix. */
3275 i.vrex &= ~vrex_used;
3276 gas_assert (i.vrex == 0);
3278 /* Check the REX.W bit. */
3279 w = (i.rex & REX_W) ? 1 : 0;
3280 if (i.tm.opcode_modifier.vexw)
3282 if (i.tm.opcode_modifier.vexw == VEXW1)
3285 /* If w is not set it means we are dealing with WIG instruction. */
3288 if (evexwig == evexw1)
3292 /* Encode the U bit. */
3293 implied_prefix |= 0x4;
3295 /* The third byte of the EVEX prefix. */
3296 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3298 /* The fourth byte of the EVEX prefix. */
3299 /* The zeroing-masking bit. */
3300 if (i.mask && i.mask->zeroing)
3301 i.vex.bytes[3] |= 0x80;
3303 /* Don't always set the broadcast bit if there is no RC. */
3306 /* Encode the vector length. */
3307 unsigned int vec_length;
3309 switch (i.tm.opcode_modifier.evex)
3311 case EVEXLIG: /* LL' is ignored */
3312 vec_length = evexlig << 5;
3315 vec_length = 0 << 5;
3318 vec_length = 1 << 5;
3321 vec_length = 2 << 5;
3327 i.vex.bytes[3] |= vec_length;
3328 /* Encode the broadcast bit. */
3330 i.vex.bytes[3] |= 0x10;
3334 if (i.rounding->type != saeonly)
3335 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3337 i.vex.bytes[3] |= 0x10;
3340 if (i.mask && i.mask->mask)
3341 i.vex.bytes[3] |= i.mask->mask->reg_num;
3345 process_immext (void)
3349 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3352 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3353 with an opcode suffix which is coded in the same place as an
3354 8-bit immediate field would be.
3355 Here we check those operands and remove them afterwards. */
3358 for (x = 0; x < i.operands; x++)
3359 if (register_number (i.op[x].regs) != x)
3360 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3361 register_prefix, i.op[x].regs->reg_name, x + 1,
3367 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3368 which is coded in the same place as an 8-bit immediate field
3369 would be. Here we fake an 8-bit immediate operand from the
3370 opcode suffix stored in tm.extension_opcode.
3372 AVX instructions also use this encoding, for some of
3373 3 argument instructions. */
3375 gas_assert (i.imm_operands <= 1
3377 || ((i.tm.opcode_modifier.vex
3378 || i.tm.opcode_modifier.evex)
3379 && i.operands <= 4)));
3381 exp = &im_expressions[i.imm_operands++];
3382 i.op[i.operands].imms = exp;
3383 i.types[i.operands] = imm8;
3385 exp->X_op = O_constant;
3386 exp->X_add_number = i.tm.extension_opcode;
3387 i.tm.extension_opcode = None;
3394 switch (i.tm.opcode_modifier.hleprefixok)
3399 as_bad (_("invalid instruction `%s' after `%s'"),
3400 i.tm.name, i.hle_prefix);
3403 if (i.prefix[LOCK_PREFIX])
3405 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3409 case HLEPrefixRelease:
3410 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3412 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3416 if (i.mem_operands == 0
3417 || !operand_type_check (i.types[i.operands - 1], anymem))
3419 as_bad (_("memory destination needed for instruction `%s'"
3420 " after `xrelease'"), i.tm.name);
3427 /* This is the guts of the machine-dependent assembler. LINE points to a
3428 machine dependent instruction. This function is supposed to emit
3429 the frags/bytes it assembles to. */
3432 md_assemble (char *line)
3435 char mnemonic[MAX_MNEM_SIZE];
3436 const insn_template *t;
3438 /* Initialize globals. */
3439 memset (&i, '\0', sizeof (i));
3440 for (j = 0; j < MAX_OPERANDS; j++)
3441 i.reloc[j] = NO_RELOC;
3442 memset (disp_expressions, '\0', sizeof (disp_expressions));
3443 memset (im_expressions, '\0', sizeof (im_expressions));
3444 save_stack_p = save_stack;
3446 /* First parse an instruction mnemonic & call i386_operand for the operands.
3447 We assume that the scrubber has arranged it so that line[0] is the valid
3448 start of a (possibly prefixed) mnemonic. */
3450 line = parse_insn (line, mnemonic);
3454 line = parse_operands (line, mnemonic);
3459 /* Now we've parsed the mnemonic into a set of templates, and have the
3460 operands at hand. */
3462 /* All intel opcodes have reversed operands except for "bound" and
3463 "enter". We also don't reverse intersegment "jmp" and "call"
3464 instructions with 2 immediate operands so that the immediate segment
3465 precedes the offset, as it does when in AT&T mode. */
3468 && (strcmp (mnemonic, "bound") != 0)
3469 && (strcmp (mnemonic, "invlpga") != 0)
3470 && !(operand_type_check (i.types[0], imm)
3471 && operand_type_check (i.types[1], imm)))
3474 /* The order of the immediates should be reversed
3475 for 2 immediates extrq and insertq instructions */
3476 if (i.imm_operands == 2
3477 && (strcmp (mnemonic, "extrq") == 0
3478 || strcmp (mnemonic, "insertq") == 0))
3479 swap_2_operands (0, 1);
3484 /* Don't optimize displacement for movabs since it only takes 64bit
3487 && i.disp_encoding != disp_encoding_32bit
3488 && (flag_code != CODE_64BIT
3489 || strcmp (mnemonic, "movabs") != 0))
3492 /* Next, we find a template that matches the given insn,
3493 making sure the overlap of the given operands types is consistent
3494 with the template operand types. */
3496 if (!(t = match_template ()))
3499 if (sse_check != check_none
3500 && !i.tm.opcode_modifier.noavx
3501 && (i.tm.cpu_flags.bitfield.cpusse
3502 || i.tm.cpu_flags.bitfield.cpusse2
3503 || i.tm.cpu_flags.bitfield.cpusse3
3504 || i.tm.cpu_flags.bitfield.cpussse3
3505 || i.tm.cpu_flags.bitfield.cpusse4_1
3506 || i.tm.cpu_flags.bitfield.cpusse4_2))
3508 (sse_check == check_warning
3510 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3513 /* Zap movzx and movsx suffix. The suffix has been set from
3514 "word ptr" or "byte ptr" on the source operand in Intel syntax
3515 or extracted from mnemonic in AT&T syntax. But we'll use
3516 the destination register to choose the suffix for encoding. */
3517 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3519 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3520 there is no suffix, the default will be byte extension. */
3521 if (i.reg_operands != 2
3524 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3529 if (i.tm.opcode_modifier.fwait)
3530 if (!add_prefix (FWAIT_OPCODE))
3533 /* Check if REP prefix is OK. */
3534 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3536 as_bad (_("invalid instruction `%s' after `%s'"),
3537 i.tm.name, i.rep_prefix);
3541 /* Check for lock without a lockable instruction. Destination operand
3542 must be memory unless it is xchg (0x86). */
3543 if (i.prefix[LOCK_PREFIX]
3544 && (!i.tm.opcode_modifier.islockable
3545 || i.mem_operands == 0
3546 || (i.tm.base_opcode != 0x86
3547 && !operand_type_check (i.types[i.operands - 1], anymem))))
3549 as_bad (_("expecting lockable instruction after `lock'"));
3553 /* Check if HLE prefix is OK. */
3554 if (i.hle_prefix && !check_hle ())
3557 /* Check BND prefix. */
3558 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3559 as_bad (_("expecting valid branch instruction after `bnd'"));
3561 if (i.tm.cpu_flags.bitfield.cpumpx
3562 && flag_code == CODE_64BIT
3563 && i.prefix[ADDR_PREFIX])
3564 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3566 /* Insert BND prefix. */
3568 && i.tm.opcode_modifier.bndprefixok
3569 && !i.prefix[BND_PREFIX])
3570 add_prefix (BND_PREFIX_OPCODE);
3572 /* Check string instruction segment overrides. */
3573 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3575 if (!check_string ())
3577 i.disp_operands = 0;
3580 if (!process_suffix ())
3583 /* Update operand types. */
3584 for (j = 0; j < i.operands; j++)
3585 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3587 /* Make still unresolved immediate matches conform to size of immediate
3588 given in i.suffix. */
3589 if (!finalize_imm ())
3592 if (i.types[0].bitfield.imm1)
3593 i.imm_operands = 0; /* kludge for shift insns. */
3595 /* We only need to check those implicit registers for instructions
3596 with 3 operands or less. */
3597 if (i.operands <= 3)
3598 for (j = 0; j < i.operands; j++)
3599 if (i.types[j].bitfield.inoutportreg
3600 || i.types[j].bitfield.shiftcount
3601 || i.types[j].bitfield.acc
3602 || i.types[j].bitfield.floatacc)
3605 /* ImmExt should be processed after SSE2AVX. */
3606 if (!i.tm.opcode_modifier.sse2avx
3607 && i.tm.opcode_modifier.immext)
3610 /* For insns with operands there are more diddles to do to the opcode. */
3613 if (!process_operands ())
3616 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3618 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3619 as_warn (_("translating to `%sp'"), i.tm.name);
3622 if (i.tm.opcode_modifier.vex)
3623 build_vex_prefix (t);
3625 if (i.tm.opcode_modifier.evex)
3626 build_evex_prefix ();
3628 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3629 instructions may define INT_OPCODE as well, so avoid this corner
3630 case for those instructions that use MODRM. */
3631 if (i.tm.base_opcode == INT_OPCODE
3632 && !i.tm.opcode_modifier.modrm
3633 && i.op[0].imms->X_add_number == 3)
3635 i.tm.base_opcode = INT3_OPCODE;
3639 if ((i.tm.opcode_modifier.jump
3640 || i.tm.opcode_modifier.jumpbyte
3641 || i.tm.opcode_modifier.jumpdword)
3642 && i.op[0].disps->X_op == O_constant)
3644 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3645 the absolute address given by the constant. Since ix86 jumps and
3646 calls are pc relative, we need to generate a reloc. */
3647 i.op[0].disps->X_add_symbol = &abs_symbol;
3648 i.op[0].disps->X_op = O_symbol;
3651 if (i.tm.opcode_modifier.rex64)
3654 /* For 8 bit registers we need an empty rex prefix. Also if the
3655 instruction already has a prefix, we need to convert old
3656 registers to new ones. */
3658 if ((i.types[0].bitfield.reg8
3659 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3660 || (i.types[1].bitfield.reg8
3661 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3662 || ((i.types[0].bitfield.reg8
3663 || i.types[1].bitfield.reg8)
3668 i.rex |= REX_OPCODE;
3669 for (x = 0; x < 2; x++)
3671 /* Look for 8 bit operand that uses old registers. */
3672 if (i.types[x].bitfield.reg8
3673 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3675 /* In case it is "hi" register, give up. */
3676 if (i.op[x].regs->reg_num > 3)
3677 as_bad (_("can't encode register '%s%s' in an "
3678 "instruction requiring REX prefix."),
3679 register_prefix, i.op[x].regs->reg_name);
3681 /* Otherwise it is equivalent to the extended register.
3682 Since the encoding doesn't change this is merely
3683 cosmetic cleanup for debug output. */
3685 i.op[x].regs = i.op[x].regs + 8;
3691 add_prefix (REX_OPCODE | i.rex);
3693 /* We are ready to output the insn. */
3698 parse_insn (char *line, char *mnemonic)
3701 char *token_start = l;
3704 const insn_template *t;
3710 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3715 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3717 as_bad (_("no such instruction: `%s'"), token_start);
3722 if (!is_space_char (*l)
3723 && *l != END_OF_INSN
3725 || (*l != PREFIX_SEPARATOR
3728 as_bad (_("invalid character %s in mnemonic"),
3729 output_invalid (*l));
3732 if (token_start == l)
3734 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3735 as_bad (_("expecting prefix; got nothing"));
3737 as_bad (_("expecting mnemonic; got nothing"));
3741 /* Look up instruction (or prefix) via hash table. */
3742 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3744 if (*l != END_OF_INSN
3745 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3746 && current_templates
3747 && current_templates->start->opcode_modifier.isprefix)
3749 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3751 as_bad ((flag_code != CODE_64BIT
3752 ? _("`%s' is only supported in 64-bit mode")
3753 : _("`%s' is not supported in 64-bit mode")),
3754 current_templates->start->name);
3757 /* If we are in 16-bit mode, do not allow addr16 or data16.
3758 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3759 if ((current_templates->start->opcode_modifier.size16
3760 || current_templates->start->opcode_modifier.size32)
3761 && flag_code != CODE_64BIT
3762 && (current_templates->start->opcode_modifier.size32
3763 ^ (flag_code == CODE_16BIT)))
3765 as_bad (_("redundant %s prefix"),
3766 current_templates->start->name);
3769 /* Add prefix, checking for repeated prefixes. */
3770 switch (add_prefix (current_templates->start->base_opcode))
3775 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3776 i.hle_prefix = current_templates->start->name;
3777 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3778 i.bnd_prefix = current_templates->start->name;
3780 i.rep_prefix = current_templates->start->name;
3785 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3792 if (!current_templates)
3794 /* Check if we should swap operand or force 32bit displacement in
3796 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3798 else if (mnem_p - 3 == dot_p
3801 i.disp_encoding = disp_encoding_8bit;
3802 else if (mnem_p - 4 == dot_p
3806 i.disp_encoding = disp_encoding_32bit;
3811 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3814 if (!current_templates)
3817 /* See if we can get a match by trimming off a suffix. */
3820 case WORD_MNEM_SUFFIX:
3821 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3822 i.suffix = SHORT_MNEM_SUFFIX;
3824 case BYTE_MNEM_SUFFIX:
3825 case QWORD_MNEM_SUFFIX:
3826 i.suffix = mnem_p[-1];
3828 current_templates = (const templates *) hash_find (op_hash,
3831 case SHORT_MNEM_SUFFIX:
3832 case LONG_MNEM_SUFFIX:
3835 i.suffix = mnem_p[-1];
3837 current_templates = (const templates *) hash_find (op_hash,
3846 if (intel_float_operand (mnemonic) == 1)
3847 i.suffix = SHORT_MNEM_SUFFIX;
3849 i.suffix = LONG_MNEM_SUFFIX;
3851 current_templates = (const templates *) hash_find (op_hash,
3856 if (!current_templates)
3858 as_bad (_("no such instruction: `%s'"), token_start);
3863 if (current_templates->start->opcode_modifier.jump
3864 || current_templates->start->opcode_modifier.jumpbyte)
3866 /* Check for a branch hint. We allow ",pt" and ",pn" for
3867 predict taken and predict not taken respectively.
3868 I'm not sure that branch hints actually do anything on loop
3869 and jcxz insns (JumpByte) for current Pentium4 chips. They
3870 may work in the future and it doesn't hurt to accept them
3872 if (l[0] == ',' && l[1] == 'p')
3876 if (!add_prefix (DS_PREFIX_OPCODE))
3880 else if (l[2] == 'n')
3882 if (!add_prefix (CS_PREFIX_OPCODE))
3888 /* Any other comma loses. */
3891 as_bad (_("invalid character %s in mnemonic"),
3892 output_invalid (*l));
3896 /* Check if instruction is supported on specified architecture. */
3898 for (t = current_templates->start; t < current_templates->end; ++t)
3900 supported |= cpu_flags_match (t);
3901 if (supported == CPU_FLAGS_PERFECT_MATCH)
3905 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3907 as_bad (flag_code == CODE_64BIT
3908 ? _("`%s' is not supported in 64-bit mode")
3909 : _("`%s' is only supported in 64-bit mode"),
3910 current_templates->start->name);
3913 if (supported != CPU_FLAGS_PERFECT_MATCH)
3915 as_bad (_("`%s' is not supported on `%s%s'"),
3916 current_templates->start->name,
3917 cpu_arch_name ? cpu_arch_name : default_arch,
3918 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3923 if (!cpu_arch_flags.bitfield.cpui386
3924 && (flag_code != CODE_16BIT))
3926 as_warn (_("use .code16 to ensure correct addressing mode"));
3933 parse_operands (char *l, const char *mnemonic)
3937 /* 1 if operand is pending after ','. */
3938 unsigned int expecting_operand = 0;
3940 /* Non-zero if operand parens not balanced. */
3941 unsigned int paren_not_balanced;
3943 while (*l != END_OF_INSN)
3945 /* Skip optional white space before operand. */
3946 if (is_space_char (*l))
3948 if (!is_operand_char (*l) && *l != END_OF_INSN)
3950 as_bad (_("invalid character %s before operand %d"),
3951 output_invalid (*l),
3955 token_start = l; /* after white space */
3956 paren_not_balanced = 0;
3957 while (paren_not_balanced || *l != ',')
3959 if (*l == END_OF_INSN)
3961 if (paren_not_balanced)
3964 as_bad (_("unbalanced parenthesis in operand %d."),
3967 as_bad (_("unbalanced brackets in operand %d."),
3972 break; /* we are done */
3974 else if (!is_operand_char (*l) && !is_space_char (*l))
3976 as_bad (_("invalid character %s in operand %d"),
3977 output_invalid (*l),
3984 ++paren_not_balanced;
3986 --paren_not_balanced;
3991 ++paren_not_balanced;
3993 --paren_not_balanced;
3997 if (l != token_start)
3998 { /* Yes, we've read in another operand. */
3999 unsigned int operand_ok;
4000 this_operand = i.operands++;
4001 i.types[this_operand].bitfield.unspecified = 1;
4002 if (i.operands > MAX_OPERANDS)
4004 as_bad (_("spurious operands; (%d operands/instruction max)"),
4008 /* Now parse operand adding info to 'i' as we go along. */
4009 END_STRING_AND_SAVE (l);
4013 i386_intel_operand (token_start,
4014 intel_float_operand (mnemonic));
4016 operand_ok = i386_att_operand (token_start);
4018 RESTORE_END_STRING (l);
4024 if (expecting_operand)
4026 expecting_operand_after_comma:
4027 as_bad (_("expecting operand after ','; got nothing"));
4032 as_bad (_("expecting operand before ','; got nothing"));
4037 /* Now *l must be either ',' or END_OF_INSN. */
4040 if (*++l == END_OF_INSN)
4042 /* Just skip it, if it's \n complain. */
4043 goto expecting_operand_after_comma;
4045 expecting_operand = 1;
4052 swap_2_operands (int xchg1, int xchg2)
4054 union i386_op temp_op;
4055 i386_operand_type temp_type;
4056 enum bfd_reloc_code_real temp_reloc;
4058 temp_type = i.types[xchg2];
4059 i.types[xchg2] = i.types[xchg1];
4060 i.types[xchg1] = temp_type;
4061 temp_op = i.op[xchg2];
4062 i.op[xchg2] = i.op[xchg1];
4063 i.op[xchg1] = temp_op;
4064 temp_reloc = i.reloc[xchg2];
4065 i.reloc[xchg2] = i.reloc[xchg1];
4066 i.reloc[xchg1] = temp_reloc;
4070 if (i.mask->operand == xchg1)
4071 i.mask->operand = xchg2;
4072 else if (i.mask->operand == xchg2)
4073 i.mask->operand = xchg1;
4077 if (i.broadcast->operand == xchg1)
4078 i.broadcast->operand = xchg2;
4079 else if (i.broadcast->operand == xchg2)
4080 i.broadcast->operand = xchg1;
4084 if (i.rounding->operand == xchg1)
4085 i.rounding->operand = xchg2;
4086 else if (i.rounding->operand == xchg2)
4087 i.rounding->operand = xchg1;
4092 swap_operands (void)
4098 swap_2_operands (1, i.operands - 2);
4101 swap_2_operands (0, i.operands - 1);
4107 if (i.mem_operands == 2)
4109 const seg_entry *temp_seg;
4110 temp_seg = i.seg[0];
4111 i.seg[0] = i.seg[1];
4112 i.seg[1] = temp_seg;
4116 /* Try to ensure constant immediates are represented in the smallest
4121 char guess_suffix = 0;
4125 guess_suffix = i.suffix;
4126 else if (i.reg_operands)
4128 /* Figure out a suffix from the last register operand specified.
4129 We can't do this properly yet, ie. excluding InOutPortReg,
4130 but the following works for instructions with immediates.
4131 In any case, we can't set i.suffix yet. */
4132 for (op = i.operands; --op >= 0;)
4133 if (i.types[op].bitfield.reg8)
4135 guess_suffix = BYTE_MNEM_SUFFIX;
4138 else if (i.types[op].bitfield.reg16)
4140 guess_suffix = WORD_MNEM_SUFFIX;
4143 else if (i.types[op].bitfield.reg32)
4145 guess_suffix = LONG_MNEM_SUFFIX;
4148 else if (i.types[op].bitfield.reg64)
4150 guess_suffix = QWORD_MNEM_SUFFIX;
4154 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4155 guess_suffix = WORD_MNEM_SUFFIX;
4157 for (op = i.operands; --op >= 0;)
4158 if (operand_type_check (i.types[op], imm))
4160 switch (i.op[op].imms->X_op)
4163 /* If a suffix is given, this operand may be shortened. */
4164 switch (guess_suffix)
4166 case LONG_MNEM_SUFFIX:
4167 i.types[op].bitfield.imm32 = 1;
4168 i.types[op].bitfield.imm64 = 1;
4170 case WORD_MNEM_SUFFIX:
4171 i.types[op].bitfield.imm16 = 1;
4172 i.types[op].bitfield.imm32 = 1;
4173 i.types[op].bitfield.imm32s = 1;
4174 i.types[op].bitfield.imm64 = 1;
4176 case BYTE_MNEM_SUFFIX:
4177 i.types[op].bitfield.imm8 = 1;
4178 i.types[op].bitfield.imm8s = 1;
4179 i.types[op].bitfield.imm16 = 1;
4180 i.types[op].bitfield.imm32 = 1;
4181 i.types[op].bitfield.imm32s = 1;
4182 i.types[op].bitfield.imm64 = 1;
4186 /* If this operand is at most 16 bits, convert it
4187 to a signed 16 bit number before trying to see
4188 whether it will fit in an even smaller size.
4189 This allows a 16-bit operand such as $0xffe0 to
4190 be recognised as within Imm8S range. */
4191 if ((i.types[op].bitfield.imm16)
4192 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4194 i.op[op].imms->X_add_number =
4195 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4197 if ((i.types[op].bitfield.imm32)
4198 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4201 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4202 ^ ((offsetT) 1 << 31))
4203 - ((offsetT) 1 << 31));
4206 = operand_type_or (i.types[op],
4207 smallest_imm_type (i.op[op].imms->X_add_number));
4209 /* We must avoid matching of Imm32 templates when 64bit
4210 only immediate is available. */
4211 if (guess_suffix == QWORD_MNEM_SUFFIX)
4212 i.types[op].bitfield.imm32 = 0;
4219 /* Symbols and expressions. */
4221 /* Convert symbolic operand to proper sizes for matching, but don't
4222 prevent matching a set of insns that only supports sizes other
4223 than those matching the insn suffix. */
4225 i386_operand_type mask, allowed;
4226 const insn_template *t;
4228 operand_type_set (&mask, 0);
4229 operand_type_set (&allowed, 0);
4231 for (t = current_templates->start;
4232 t < current_templates->end;
4234 allowed = operand_type_or (allowed,
4235 t->operand_types[op]);
4236 switch (guess_suffix)
4238 case QWORD_MNEM_SUFFIX:
4239 mask.bitfield.imm64 = 1;
4240 mask.bitfield.imm32s = 1;
4242 case LONG_MNEM_SUFFIX:
4243 mask.bitfield.imm32 = 1;
4245 case WORD_MNEM_SUFFIX:
4246 mask.bitfield.imm16 = 1;
4248 case BYTE_MNEM_SUFFIX:
4249 mask.bitfield.imm8 = 1;
4254 allowed = operand_type_and (mask, allowed);
4255 if (!operand_type_all_zero (&allowed))
4256 i.types[op] = operand_type_and (i.types[op], mask);
4263 /* Try to use the smallest displacement type too. */
4265 optimize_disp (void)
4269 for (op = i.operands; --op >= 0;)
4270 if (operand_type_check (i.types[op], disp))
4272 if (i.op[op].disps->X_op == O_constant)
4274 offsetT op_disp = i.op[op].disps->X_add_number;
4276 if (i.types[op].bitfield.disp16
4277 && (op_disp & ~(offsetT) 0xffff) == 0)
4279 /* If this operand is at most 16 bits, convert
4280 to a signed 16 bit number and don't use 64bit
4282 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4283 i.types[op].bitfield.disp64 = 0;
4285 if (i.types[op].bitfield.disp32
4286 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4288 /* If this operand is at most 32 bits, convert
4289 to a signed 32 bit number and don't use 64bit
4291 op_disp &= (((offsetT) 2 << 31) - 1);
4292 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4293 i.types[op].bitfield.disp64 = 0;
4295 if (!op_disp && i.types[op].bitfield.baseindex)
4297 i.types[op].bitfield.disp8 = 0;
4298 i.types[op].bitfield.disp16 = 0;
4299 i.types[op].bitfield.disp32 = 0;
4300 i.types[op].bitfield.disp32s = 0;
4301 i.types[op].bitfield.disp64 = 0;
4305 else if (flag_code == CODE_64BIT)
4307 if (fits_in_signed_long (op_disp))
4309 i.types[op].bitfield.disp64 = 0;
4310 i.types[op].bitfield.disp32s = 1;
4312 if (i.prefix[ADDR_PREFIX]
4313 && fits_in_unsigned_long (op_disp))
4314 i.types[op].bitfield.disp32 = 1;
4316 if ((i.types[op].bitfield.disp32
4317 || i.types[op].bitfield.disp32s
4318 || i.types[op].bitfield.disp16)
4319 && fits_in_signed_byte (op_disp))
4320 i.types[op].bitfield.disp8 = 1;
4322 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4323 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4325 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4326 i.op[op].disps, 0, i.reloc[op]);
4327 i.types[op].bitfield.disp8 = 0;
4328 i.types[op].bitfield.disp16 = 0;
4329 i.types[op].bitfield.disp32 = 0;
4330 i.types[op].bitfield.disp32s = 0;
4331 i.types[op].bitfield.disp64 = 0;
4334 /* We only support 64bit displacement on constants. */
4335 i.types[op].bitfield.disp64 = 0;
4339 /* Check if operands are valid for the instruction. */
4342 check_VecOperands (const insn_template *t)
4346 /* Without VSIB byte, we can't have a vector register for index. */
4347 if (!t->opcode_modifier.vecsib
4349 && (i.index_reg->reg_type.bitfield.regxmm
4350 || i.index_reg->reg_type.bitfield.regymm
4351 || i.index_reg->reg_type.bitfield.regzmm))
4353 i.error = unsupported_vector_index_register;
4357 /* Check if default mask is allowed. */
4358 if (t->opcode_modifier.nodefmask
4359 && (!i.mask || i.mask->mask->reg_num == 0))
4361 i.error = no_default_mask;
4365 /* For VSIB byte, we need a vector register for index, and all vector
4366 registers must be distinct. */
4367 if (t->opcode_modifier.vecsib)
4370 || !((t->opcode_modifier.vecsib == VecSIB128
4371 && i.index_reg->reg_type.bitfield.regxmm)
4372 || (t->opcode_modifier.vecsib == VecSIB256
4373 && i.index_reg->reg_type.bitfield.regymm)
4374 || (t->opcode_modifier.vecsib == VecSIB512
4375 && i.index_reg->reg_type.bitfield.regzmm)))
4377 i.error = invalid_vsib_address;
4381 gas_assert (i.reg_operands == 2 || i.mask);
4382 if (i.reg_operands == 2 && !i.mask)
4384 gas_assert (i.types[0].bitfield.regxmm
4385 || i.types[0].bitfield.regymm);
4386 gas_assert (i.types[2].bitfield.regxmm
4387 || i.types[2].bitfield.regymm);
4388 if (operand_check == check_none)
4390 if (register_number (i.op[0].regs)
4391 != register_number (i.index_reg)
4392 && register_number (i.op[2].regs)
4393 != register_number (i.index_reg)
4394 && register_number (i.op[0].regs)
4395 != register_number (i.op[2].regs))
4397 if (operand_check == check_error)
4399 i.error = invalid_vector_register_set;
4402 as_warn (_("mask, index, and destination registers should be distinct"));
4404 else if (i.reg_operands == 1 && i.mask)
4406 if ((i.types[1].bitfield.regymm
4407 || i.types[1].bitfield.regzmm)
4408 && (register_number (i.op[1].regs)
4409 == register_number (i.index_reg)))
4411 if (operand_check == check_error)
4413 i.error = invalid_vector_register_set;
4416 if (operand_check != check_none)
4417 as_warn (_("index and destination registers should be distinct"));
4422 /* Check if broadcast is supported by the instruction and is applied
4423 to the memory operand. */
4426 int broadcasted_opnd_size;
4428 /* Check if specified broadcast is supported in this instruction,
4429 and it's applied to memory operand of DWORD or QWORD type,
4430 depending on VecESize. */
4431 if (i.broadcast->type != t->opcode_modifier.broadcast
4432 || !i.types[i.broadcast->operand].bitfield.mem
4433 || (t->opcode_modifier.vecesize == 0
4434 && !i.types[i.broadcast->operand].bitfield.dword
4435 && !i.types[i.broadcast->operand].bitfield.unspecified)
4436 || (t->opcode_modifier.vecesize == 1
4437 && !i.types[i.broadcast->operand].bitfield.qword
4438 && !i.types[i.broadcast->operand].bitfield.unspecified))
4441 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4442 if (i.broadcast->type == BROADCAST_1TO16)
4443 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4444 else if (i.broadcast->type == BROADCAST_1TO8)
4445 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4449 if ((broadcasted_opnd_size == 256
4450 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4451 || (broadcasted_opnd_size == 512
4452 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4455 i.error = unsupported_broadcast;
4459 /* If broadcast is supported in this instruction, we need to check if
4460 operand of one-element size isn't specified without broadcast. */
4461 else if (t->opcode_modifier.broadcast && i.mem_operands)
4463 /* Find memory operand. */
4464 for (op = 0; op < i.operands; op++)
4465 if (operand_type_check (i.types[op], anymem))
4467 gas_assert (op < i.operands);
4468 /* Check size of the memory operand. */
4469 if ((t->opcode_modifier.vecesize == 0
4470 && i.types[op].bitfield.dword)
4471 || (t->opcode_modifier.vecesize == 1
4472 && i.types[op].bitfield.qword))
4474 i.error = broadcast_needed;
4479 /* Check if requested masking is supported. */
4481 && (!t->opcode_modifier.masking
4483 && t->opcode_modifier.masking == MERGING_MASKING)))
4485 i.error = unsupported_masking;
4489 /* Check if masking is applied to dest operand. */
4490 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4492 i.error = mask_not_on_destination;
4499 if ((i.rounding->type != saeonly
4500 && !t->opcode_modifier.staticrounding)
4501 || (i.rounding->type == saeonly
4502 && (t->opcode_modifier.staticrounding
4503 || !t->opcode_modifier.sae)))
4505 i.error = unsupported_rc_sae;
4508 /* If the instruction has several immediate operands and one of
4509 them is rounding, the rounding operand should be the last
4510 immediate operand. */
4511 if (i.imm_operands > 1
4512 && i.rounding->operand != (int) (i.imm_operands - 1))
4514 i.error = rc_sae_operand_not_last_imm;
4519 /* Check vector Disp8 operand. */
4520 if (t->opcode_modifier.disp8memshift)
4523 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4525 i.memshift = t->opcode_modifier.disp8memshift;
4527 for (op = 0; op < i.operands; op++)
4528 if (operand_type_check (i.types[op], disp)
4529 && i.op[op].disps->X_op == O_constant)
4531 offsetT value = i.op[op].disps->X_add_number;
4532 int vec_disp8_ok = fits_in_vec_disp8 (value);
4533 if (t->operand_types [op].bitfield.vec_disp8)
4536 i.types[op].bitfield.vec_disp8 = 1;
4539 /* Vector insn can only have Vec_Disp8/Disp32 in
4540 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4542 i.types[op].bitfield.disp8 = 0;
4543 if (flag_code != CODE_16BIT)
4544 i.types[op].bitfield.disp16 = 0;
4547 else if (flag_code != CODE_16BIT)
4549 /* One form of this instruction supports vector Disp8.
4550 Try vector Disp8 if we need to use Disp32. */
4551 if (vec_disp8_ok && !fits_in_signed_byte (value))
4553 i.error = try_vector_disp8;
4565 /* Check if operands are valid for the instruction. Update VEX
4569 VEX_check_operands (const insn_template *t)
4571 /* VREX is only valid with EVEX prefix. */
4572 if (i.need_vrex && !t->opcode_modifier.evex)
4574 i.error = invalid_register_operand;
4578 if (!t->opcode_modifier.vex)
4581 /* Only check VEX_Imm4, which must be the first operand. */
4582 if (t->operand_types[0].bitfield.vec_imm4)
4584 if (i.op[0].imms->X_op != O_constant
4585 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4591 /* Turn off Imm8 so that update_imm won't complain. */
4592 i.types[0] = vec_imm4;
4598 static const insn_template *
4599 match_template (void)
4601 /* Points to template once we've found it. */
4602 const insn_template *t;
4603 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4604 i386_operand_type overlap4;
4605 unsigned int found_reverse_match;
4606 i386_opcode_modifier suffix_check;
4607 i386_operand_type operand_types [MAX_OPERANDS];
4608 int addr_prefix_disp;
4610 unsigned int found_cpu_match;
4611 unsigned int check_register;
4612 enum i386_error specific_error = 0;
4614 #if MAX_OPERANDS != 5
4615 # error "MAX_OPERANDS must be 5."
4618 found_reverse_match = 0;
4619 addr_prefix_disp = -1;
4621 memset (&suffix_check, 0, sizeof (suffix_check));
4622 if (i.suffix == BYTE_MNEM_SUFFIX)
4623 suffix_check.no_bsuf = 1;
4624 else if (i.suffix == WORD_MNEM_SUFFIX)
4625 suffix_check.no_wsuf = 1;
4626 else if (i.suffix == SHORT_MNEM_SUFFIX)
4627 suffix_check.no_ssuf = 1;
4628 else if (i.suffix == LONG_MNEM_SUFFIX)
4629 suffix_check.no_lsuf = 1;
4630 else if (i.suffix == QWORD_MNEM_SUFFIX)
4631 suffix_check.no_qsuf = 1;
4632 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4633 suffix_check.no_ldsuf = 1;
4635 /* Must have right number of operands. */
4636 i.error = number_of_operands_mismatch;
4638 for (t = current_templates->start; t < current_templates->end; t++)
4640 addr_prefix_disp = -1;
4642 if (i.operands != t->operands)
4645 /* Check processor support. */
4646 i.error = unsupported;
4647 found_cpu_match = (cpu_flags_match (t)
4648 == CPU_FLAGS_PERFECT_MATCH);
4649 if (!found_cpu_match)
4652 /* Check old gcc support. */
4653 i.error = old_gcc_only;
4654 if (!old_gcc && t->opcode_modifier.oldgcc)
4657 /* Check AT&T mnemonic. */
4658 i.error = unsupported_with_intel_mnemonic;
4659 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4662 /* Check AT&T/Intel syntax. */
4663 i.error = unsupported_syntax;
4664 if ((intel_syntax && t->opcode_modifier.attsyntax)
4665 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4668 /* Check the suffix, except for some instructions in intel mode. */
4669 i.error = invalid_instruction_suffix;
4670 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4671 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4672 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4673 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4674 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4675 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4676 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4679 if (!operand_size_match (t))
4682 for (j = 0; j < MAX_OPERANDS; j++)
4683 operand_types[j] = t->operand_types[j];
4685 /* In general, don't allow 64-bit operands in 32-bit mode. */
4686 if (i.suffix == QWORD_MNEM_SUFFIX
4687 && flag_code != CODE_64BIT
4689 ? (!t->opcode_modifier.ignoresize
4690 && !intel_float_operand (t->name))
4691 : intel_float_operand (t->name) != 2)
4692 && ((!operand_types[0].bitfield.regmmx
4693 && !operand_types[0].bitfield.regxmm
4694 && !operand_types[0].bitfield.regymm
4695 && !operand_types[0].bitfield.regzmm)
4696 || (!operand_types[t->operands > 1].bitfield.regmmx
4697 && !!operand_types[t->operands > 1].bitfield.regxmm
4698 && !!operand_types[t->operands > 1].bitfield.regymm
4699 && !!operand_types[t->operands > 1].bitfield.regzmm))
4700 && (t->base_opcode != 0x0fc7
4701 || t->extension_opcode != 1 /* cmpxchg8b */))
4704 /* In general, don't allow 32-bit operands on pre-386. */
4705 else if (i.suffix == LONG_MNEM_SUFFIX
4706 && !cpu_arch_flags.bitfield.cpui386
4708 ? (!t->opcode_modifier.ignoresize
4709 && !intel_float_operand (t->name))
4710 : intel_float_operand (t->name) != 2)
4711 && ((!operand_types[0].bitfield.regmmx
4712 && !operand_types[0].bitfield.regxmm)
4713 || (!operand_types[t->operands > 1].bitfield.regmmx
4714 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4717 /* Do not verify operands when there are none. */
4721 /* We've found a match; break out of loop. */
4725 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4726 into Disp32/Disp16/Disp32 operand. */
4727 if (i.prefix[ADDR_PREFIX] != 0)
4729 /* There should be only one Disp operand. */
4733 for (j = 0; j < MAX_OPERANDS; j++)
4735 if (operand_types[j].bitfield.disp16)
4737 addr_prefix_disp = j;
4738 operand_types[j].bitfield.disp32 = 1;
4739 operand_types[j].bitfield.disp16 = 0;
4745 for (j = 0; j < MAX_OPERANDS; j++)
4747 if (operand_types[j].bitfield.disp32)
4749 addr_prefix_disp = j;
4750 operand_types[j].bitfield.disp32 = 0;
4751 operand_types[j].bitfield.disp16 = 1;
4757 for (j = 0; j < MAX_OPERANDS; j++)
4759 if (operand_types[j].bitfield.disp64)
4761 addr_prefix_disp = j;
4762 operand_types[j].bitfield.disp64 = 0;
4763 operand_types[j].bitfield.disp32 = 1;
4771 /* We check register size if needed. */
4772 check_register = t->opcode_modifier.checkregsize;
4773 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4774 switch (t->operands)
4777 if (!operand_type_match (overlap0, i.types[0]))
4781 /* xchg %eax, %eax is a special case. It is an aliase for nop
4782 only in 32bit mode and we can use opcode 0x90. In 64bit
4783 mode, we can't use 0x90 for xchg %eax, %eax since it should
4784 zero-extend %eax to %rax. */
4785 if (flag_code == CODE_64BIT
4786 && t->base_opcode == 0x90
4787 && operand_type_equal (&i.types [0], &acc32)
4788 && operand_type_equal (&i.types [1], &acc32))
4792 /* If we swap operand in encoding, we either match
4793 the next one or reverse direction of operands. */
4794 if (t->opcode_modifier.s)
4796 else if (t->opcode_modifier.d)
4801 /* If we swap operand in encoding, we match the next one. */
4802 if (i.swap_operand && t->opcode_modifier.s)
4806 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4807 if (!operand_type_match (overlap0, i.types[0])
4808 || !operand_type_match (overlap1, i.types[1])
4810 && !operand_type_register_match (overlap0, i.types[0],
4812 overlap1, i.types[1],
4815 /* Check if other direction is valid ... */
4816 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4820 /* Try reversing direction of operands. */
4821 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4822 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4823 if (!operand_type_match (overlap0, i.types[0])
4824 || !operand_type_match (overlap1, i.types[1])
4826 && !operand_type_register_match (overlap0,
4833 /* Does not match either direction. */
4836 /* found_reverse_match holds which of D or FloatDR
4838 if (t->opcode_modifier.d)
4839 found_reverse_match = Opcode_D;
4840 else if (t->opcode_modifier.floatd)
4841 found_reverse_match = Opcode_FloatD;
4843 found_reverse_match = 0;
4844 if (t->opcode_modifier.floatr)
4845 found_reverse_match |= Opcode_FloatR;
4849 /* Found a forward 2 operand match here. */
4850 switch (t->operands)
4853 overlap4 = operand_type_and (i.types[4],
4856 overlap3 = operand_type_and (i.types[3],
4859 overlap2 = operand_type_and (i.types[2],
4864 switch (t->operands)
4867 if (!operand_type_match (overlap4, i.types[4])
4868 || !operand_type_register_match (overlap3,
4876 if (!operand_type_match (overlap3, i.types[3])
4878 && !operand_type_register_match (overlap2,
4886 /* Here we make use of the fact that there are no
4887 reverse match 3 operand instructions, and all 3
4888 operand instructions only need to be checked for
4889 register consistency between operands 2 and 3. */
4890 if (!operand_type_match (overlap2, i.types[2])
4892 && !operand_type_register_match (overlap1,
4902 /* Found either forward/reverse 2, 3 or 4 operand match here:
4903 slip through to break. */
4905 if (!found_cpu_match)
4907 found_reverse_match = 0;
4911 /* Check if vector and VEX operands are valid. */
4912 if (check_VecOperands (t) || VEX_check_operands (t))
4914 specific_error = i.error;
4918 /* We've found a match; break out of loop. */
4922 if (t == current_templates->end)
4924 /* We found no match. */
4925 const char *err_msg;
4926 switch (specific_error ? specific_error : i.error)
4930 case operand_size_mismatch:
4931 err_msg = _("operand size mismatch");
4933 case operand_type_mismatch:
4934 err_msg = _("operand type mismatch");
4936 case register_type_mismatch:
4937 err_msg = _("register type mismatch");
4939 case number_of_operands_mismatch:
4940 err_msg = _("number of operands mismatch");
4942 case invalid_instruction_suffix:
4943 err_msg = _("invalid instruction suffix");
4946 err_msg = _("constant doesn't fit in 4 bits");
4949 err_msg = _("only supported with old gcc");
4951 case unsupported_with_intel_mnemonic:
4952 err_msg = _("unsupported with Intel mnemonic");
4954 case unsupported_syntax:
4955 err_msg = _("unsupported syntax");
4958 as_bad (_("unsupported instruction `%s'"),
4959 current_templates->start->name);
4961 case invalid_vsib_address:
4962 err_msg = _("invalid VSIB address");
4964 case invalid_vector_register_set:
4965 err_msg = _("mask, index, and destination registers must be distinct");
4967 case unsupported_vector_index_register:
4968 err_msg = _("unsupported vector index register");
4970 case unsupported_broadcast:
4971 err_msg = _("unsupported broadcast");
4973 case broadcast_not_on_src_operand:
4974 err_msg = _("broadcast not on source memory operand");
4976 case broadcast_needed:
4977 err_msg = _("broadcast is needed for operand of such type");
4979 case unsupported_masking:
4980 err_msg = _("unsupported masking");
4982 case mask_not_on_destination:
4983 err_msg = _("mask not on destination operand");
4985 case no_default_mask:
4986 err_msg = _("default mask isn't allowed");
4988 case unsupported_rc_sae:
4989 err_msg = _("unsupported static rounding/sae");
4991 case rc_sae_operand_not_last_imm:
4993 err_msg = _("RC/SAE operand must precede immediate operands");
4995 err_msg = _("RC/SAE operand must follow immediate operands");
4997 case invalid_register_operand:
4998 err_msg = _("invalid register operand");
5001 as_bad (_("%s for `%s'"), err_msg,
5002 current_templates->start->name);
5006 if (!quiet_warnings)
5009 && (i.types[0].bitfield.jumpabsolute
5010 != operand_types[0].bitfield.jumpabsolute))
5012 as_warn (_("indirect %s without `*'"), t->name);
5015 if (t->opcode_modifier.isprefix
5016 && t->opcode_modifier.ignoresize)
5018 /* Warn them that a data or address size prefix doesn't
5019 affect assembly of the next line of code. */
5020 as_warn (_("stand-alone `%s' prefix"), t->name);
5024 /* Copy the template we found. */
5027 if (addr_prefix_disp != -1)
5028 i.tm.operand_types[addr_prefix_disp]
5029 = operand_types[addr_prefix_disp];
5031 if (found_reverse_match)
5033 /* If we found a reverse match we must alter the opcode
5034 direction bit. found_reverse_match holds bits to change
5035 (different for int & float insns). */
5037 i.tm.base_opcode ^= found_reverse_match;
5039 i.tm.operand_types[0] = operand_types[1];
5040 i.tm.operand_types[1] = operand_types[0];
5049 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5050 if (i.tm.operand_types[mem_op].bitfield.esseg)
5052 if (i.seg[0] != NULL && i.seg[0] != &es)
5054 as_bad (_("`%s' operand %d must use `%ses' segment"),
5060 /* There's only ever one segment override allowed per instruction.
5061 This instruction possibly has a legal segment override on the
5062 second operand, so copy the segment to where non-string
5063 instructions store it, allowing common code. */
5064 i.seg[0] = i.seg[1];
5066 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5068 if (i.seg[1] != NULL && i.seg[1] != &es)
5070 as_bad (_("`%s' operand %d must use `%ses' segment"),
5081 process_suffix (void)
5083 /* If matched instruction specifies an explicit instruction mnemonic
5085 if (i.tm.opcode_modifier.size16)
5086 i.suffix = WORD_MNEM_SUFFIX;
5087 else if (i.tm.opcode_modifier.size32)
5088 i.suffix = LONG_MNEM_SUFFIX;
5089 else if (i.tm.opcode_modifier.size64)
5090 i.suffix = QWORD_MNEM_SUFFIX;
5091 else if (i.reg_operands)
5093 /* If there's no instruction mnemonic suffix we try to invent one
5094 based on register operands. */
5097 /* We take i.suffix from the last register operand specified,
5098 Destination register type is more significant than source
5099 register type. crc32 in SSE4.2 prefers source register
5101 if (i.tm.base_opcode == 0xf20f38f1)
5103 if (i.types[0].bitfield.reg16)
5104 i.suffix = WORD_MNEM_SUFFIX;
5105 else if (i.types[0].bitfield.reg32)
5106 i.suffix = LONG_MNEM_SUFFIX;
5107 else if (i.types[0].bitfield.reg64)
5108 i.suffix = QWORD_MNEM_SUFFIX;
5110 else if (i.tm.base_opcode == 0xf20f38f0)
5112 if (i.types[0].bitfield.reg8)
5113 i.suffix = BYTE_MNEM_SUFFIX;
5120 if (i.tm.base_opcode == 0xf20f38f1
5121 || i.tm.base_opcode == 0xf20f38f0)
5123 /* We have to know the operand size for crc32. */
5124 as_bad (_("ambiguous memory operand size for `%s`"),
5129 for (op = i.operands; --op >= 0;)
5130 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5132 if (i.types[op].bitfield.reg8)
5134 i.suffix = BYTE_MNEM_SUFFIX;
5137 else if (i.types[op].bitfield.reg16)
5139 i.suffix = WORD_MNEM_SUFFIX;
5142 else if (i.types[op].bitfield.reg32)
5144 i.suffix = LONG_MNEM_SUFFIX;
5147 else if (i.types[op].bitfield.reg64)
5149 i.suffix = QWORD_MNEM_SUFFIX;
5155 else if (i.suffix == BYTE_MNEM_SUFFIX)
5158 && i.tm.opcode_modifier.ignoresize
5159 && i.tm.opcode_modifier.no_bsuf)
5161 else if (!check_byte_reg ())
5164 else if (i.suffix == LONG_MNEM_SUFFIX)
5167 && i.tm.opcode_modifier.ignoresize
5168 && i.tm.opcode_modifier.no_lsuf)
5170 else if (!check_long_reg ())
5173 else if (i.suffix == QWORD_MNEM_SUFFIX)
5176 && i.tm.opcode_modifier.ignoresize
5177 && i.tm.opcode_modifier.no_qsuf)
5179 else if (!check_qword_reg ())
5182 else if (i.suffix == WORD_MNEM_SUFFIX)
5185 && i.tm.opcode_modifier.ignoresize
5186 && i.tm.opcode_modifier.no_wsuf)
5188 else if (!check_word_reg ())
5191 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5192 || i.suffix == YMMWORD_MNEM_SUFFIX
5193 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5195 /* Skip if the instruction has x/y/z suffix. match_template
5196 should check if it is a valid suffix. */
5198 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5199 /* Do nothing if the instruction is going to ignore the prefix. */
5204 else if (i.tm.opcode_modifier.defaultsize
5206 /* exclude fldenv/frstor/fsave/fstenv */
5207 && i.tm.opcode_modifier.no_ssuf)
5209 i.suffix = stackop_size;
5211 else if (intel_syntax
5213 && (i.tm.operand_types[0].bitfield.jumpabsolute
5214 || i.tm.opcode_modifier.jumpbyte
5215 || i.tm.opcode_modifier.jumpintersegment
5216 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5217 && i.tm.extension_opcode <= 3)))
5222 if (!i.tm.opcode_modifier.no_qsuf)
5224 i.suffix = QWORD_MNEM_SUFFIX;
5228 if (!i.tm.opcode_modifier.no_lsuf)
5229 i.suffix = LONG_MNEM_SUFFIX;
5232 if (!i.tm.opcode_modifier.no_wsuf)
5233 i.suffix = WORD_MNEM_SUFFIX;
5242 if (i.tm.opcode_modifier.w)
5244 as_bad (_("no instruction mnemonic suffix given and "
5245 "no register operands; can't size instruction"));
5251 unsigned int suffixes;
5253 suffixes = !i.tm.opcode_modifier.no_bsuf;
5254 if (!i.tm.opcode_modifier.no_wsuf)
5256 if (!i.tm.opcode_modifier.no_lsuf)
5258 if (!i.tm.opcode_modifier.no_ldsuf)
5260 if (!i.tm.opcode_modifier.no_ssuf)
5262 if (!i.tm.opcode_modifier.no_qsuf)
5265 /* There are more than suffix matches. */
5266 if (i.tm.opcode_modifier.w
5267 || ((suffixes & (suffixes - 1))
5268 && !i.tm.opcode_modifier.defaultsize
5269 && !i.tm.opcode_modifier.ignoresize))
5271 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5277 /* Change the opcode based on the operand size given by i.suffix;
5278 We don't need to change things for byte insns. */
5281 && i.suffix != BYTE_MNEM_SUFFIX
5282 && i.suffix != XMMWORD_MNEM_SUFFIX
5283 && i.suffix != YMMWORD_MNEM_SUFFIX
5284 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5286 /* It's not a byte, select word/dword operation. */
5287 if (i.tm.opcode_modifier.w)
5289 if (i.tm.opcode_modifier.shortform)
5290 i.tm.base_opcode |= 8;
5292 i.tm.base_opcode |= 1;
5295 /* Now select between word & dword operations via the operand
5296 size prefix, except for instructions that will ignore this
5298 if (i.tm.opcode_modifier.addrprefixop0)
5300 /* The address size override prefix changes the size of the
5302 if ((flag_code == CODE_32BIT
5303 && i.op->regs[0].reg_type.bitfield.reg16)
5304 || (flag_code != CODE_32BIT
5305 && i.op->regs[0].reg_type.bitfield.reg32))
5306 if (!add_prefix (ADDR_PREFIX_OPCODE))
5309 else if (i.suffix != QWORD_MNEM_SUFFIX
5310 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5311 && !i.tm.opcode_modifier.ignoresize
5312 && !i.tm.opcode_modifier.floatmf
5313 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5314 || (flag_code == CODE_64BIT
5315 && i.tm.opcode_modifier.jumpbyte)))
5317 unsigned int prefix = DATA_PREFIX_OPCODE;
5319 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5320 prefix = ADDR_PREFIX_OPCODE;
5322 if (!add_prefix (prefix))
5326 /* Set mode64 for an operand. */
5327 if (i.suffix == QWORD_MNEM_SUFFIX
5328 && flag_code == CODE_64BIT
5329 && !i.tm.opcode_modifier.norex64)
5331 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5332 need rex64. cmpxchg8b is also a special case. */
5333 if (! (i.operands == 2
5334 && i.tm.base_opcode == 0x90
5335 && i.tm.extension_opcode == None
5336 && operand_type_equal (&i.types [0], &acc64)
5337 && operand_type_equal (&i.types [1], &acc64))
5338 && ! (i.operands == 1
5339 && i.tm.base_opcode == 0xfc7
5340 && i.tm.extension_opcode == 1
5341 && !operand_type_check (i.types [0], reg)
5342 && operand_type_check (i.types [0], anymem)))
5346 /* Size floating point instruction. */
5347 if (i.suffix == LONG_MNEM_SUFFIX)
5348 if (i.tm.opcode_modifier.floatmf)
5349 i.tm.base_opcode ^= 4;
5356 check_byte_reg (void)
5360 for (op = i.operands; --op >= 0;)
5362 /* If this is an eight bit register, it's OK. If it's the 16 or
5363 32 bit version of an eight bit register, we will just use the
5364 low portion, and that's OK too. */
5365 if (i.types[op].bitfield.reg8)
5368 /* I/O port address operands are OK too. */
5369 if (i.tm.operand_types[op].bitfield.inoutportreg)
5372 /* crc32 doesn't generate this warning. */
5373 if (i.tm.base_opcode == 0xf20f38f0)
5376 if ((i.types[op].bitfield.reg16
5377 || i.types[op].bitfield.reg32
5378 || i.types[op].bitfield.reg64)
5379 && i.op[op].regs->reg_num < 4
5380 /* Prohibit these changes in 64bit mode, since the lowering
5381 would be more complicated. */
5382 && flag_code != CODE_64BIT)
5384 #if REGISTER_WARNINGS
5385 if (!quiet_warnings)
5386 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5388 (i.op[op].regs + (i.types[op].bitfield.reg16
5389 ? REGNAM_AL - REGNAM_AX
5390 : REGNAM_AL - REGNAM_EAX))->reg_name,
5392 i.op[op].regs->reg_name,
5397 /* Any other register is bad. */
5398 if (i.types[op].bitfield.reg16
5399 || i.types[op].bitfield.reg32
5400 || i.types[op].bitfield.reg64
5401 || i.types[op].bitfield.regmmx
5402 || i.types[op].bitfield.regxmm
5403 || i.types[op].bitfield.regymm
5404 || i.types[op].bitfield.regzmm
5405 || i.types[op].bitfield.sreg2
5406 || i.types[op].bitfield.sreg3
5407 || i.types[op].bitfield.control
5408 || i.types[op].bitfield.debug
5409 || i.types[op].bitfield.test
5410 || i.types[op].bitfield.floatreg
5411 || i.types[op].bitfield.floatacc)
5413 as_bad (_("`%s%s' not allowed with `%s%c'"),
5415 i.op[op].regs->reg_name,
5425 check_long_reg (void)
5429 for (op = i.operands; --op >= 0;)
5430 /* Reject eight bit registers, except where the template requires
5431 them. (eg. movzb) */
5432 if (i.types[op].bitfield.reg8
5433 && (i.tm.operand_types[op].bitfield.reg16
5434 || i.tm.operand_types[op].bitfield.reg32
5435 || i.tm.operand_types[op].bitfield.acc))
5437 as_bad (_("`%s%s' not allowed with `%s%c'"),
5439 i.op[op].regs->reg_name,
5444 /* Warn if the e prefix on a general reg is missing. */
5445 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5446 && i.types[op].bitfield.reg16
5447 && (i.tm.operand_types[op].bitfield.reg32
5448 || i.tm.operand_types[op].bitfield.acc))
5450 /* Prohibit these changes in the 64bit mode, since the
5451 lowering is more complicated. */
5452 if (flag_code == CODE_64BIT)
5454 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5455 register_prefix, i.op[op].regs->reg_name,
5459 #if REGISTER_WARNINGS
5460 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5462 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5463 register_prefix, i.op[op].regs->reg_name, i.suffix);
5466 /* Warn if the r prefix on a general reg is present. */
5467 else if (i.types[op].bitfield.reg64
5468 && (i.tm.operand_types[op].bitfield.reg32
5469 || i.tm.operand_types[op].bitfield.acc))
5472 && i.tm.opcode_modifier.toqword
5473 && !i.types[0].bitfield.regxmm)
5475 /* Convert to QWORD. We want REX byte. */
5476 i.suffix = QWORD_MNEM_SUFFIX;
5480 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5481 register_prefix, i.op[op].regs->reg_name,
5490 check_qword_reg (void)
5494 for (op = i.operands; --op >= 0; )
5495 /* Reject eight bit registers, except where the template requires
5496 them. (eg. movzb) */
5497 if (i.types[op].bitfield.reg8
5498 && (i.tm.operand_types[op].bitfield.reg16
5499 || i.tm.operand_types[op].bitfield.reg32
5500 || i.tm.operand_types[op].bitfield.acc))
5502 as_bad (_("`%s%s' not allowed with `%s%c'"),
5504 i.op[op].regs->reg_name,
5509 /* Warn if the r prefix on a general reg is missing. */
5510 else if ((i.types[op].bitfield.reg16
5511 || i.types[op].bitfield.reg32)
5512 && (i.tm.operand_types[op].bitfield.reg32
5513 || i.tm.operand_types[op].bitfield.acc))
5515 /* Prohibit these changes in the 64bit mode, since the
5516 lowering is more complicated. */
5518 && i.tm.opcode_modifier.todword
5519 && !i.types[0].bitfield.regxmm)
5521 /* Convert to DWORD. We don't want REX byte. */
5522 i.suffix = LONG_MNEM_SUFFIX;
5526 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5527 register_prefix, i.op[op].regs->reg_name,
5536 check_word_reg (void)
5539 for (op = i.operands; --op >= 0;)
5540 /* Reject eight bit registers, except where the template requires
5541 them. (eg. movzb) */
5542 if (i.types[op].bitfield.reg8
5543 && (i.tm.operand_types[op].bitfield.reg16
5544 || i.tm.operand_types[op].bitfield.reg32
5545 || i.tm.operand_types[op].bitfield.acc))
5547 as_bad (_("`%s%s' not allowed with `%s%c'"),
5549 i.op[op].regs->reg_name,
5554 /* Warn if the e or r prefix on a general reg is present. */
5555 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5556 && (i.types[op].bitfield.reg32
5557 || i.types[op].bitfield.reg64)
5558 && (i.tm.operand_types[op].bitfield.reg16
5559 || i.tm.operand_types[op].bitfield.acc))
5561 /* Prohibit these changes in the 64bit mode, since the
5562 lowering is more complicated. */
5563 if (flag_code == CODE_64BIT)
5565 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5566 register_prefix, i.op[op].regs->reg_name,
5570 #if REGISTER_WARNINGS
5571 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5573 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5574 register_prefix, i.op[op].regs->reg_name, i.suffix);
5581 update_imm (unsigned int j)
5583 i386_operand_type overlap = i.types[j];
5584 if ((overlap.bitfield.imm8
5585 || overlap.bitfield.imm8s
5586 || overlap.bitfield.imm16
5587 || overlap.bitfield.imm32
5588 || overlap.bitfield.imm32s
5589 || overlap.bitfield.imm64)
5590 && !operand_type_equal (&overlap, &imm8)
5591 && !operand_type_equal (&overlap, &imm8s)
5592 && !operand_type_equal (&overlap, &imm16)
5593 && !operand_type_equal (&overlap, &imm32)
5594 && !operand_type_equal (&overlap, &imm32s)
5595 && !operand_type_equal (&overlap, &imm64))
5599 i386_operand_type temp;
5601 operand_type_set (&temp, 0);
5602 if (i.suffix == BYTE_MNEM_SUFFIX)
5604 temp.bitfield.imm8 = overlap.bitfield.imm8;
5605 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5607 else if (i.suffix == WORD_MNEM_SUFFIX)
5608 temp.bitfield.imm16 = overlap.bitfield.imm16;
5609 else if (i.suffix == QWORD_MNEM_SUFFIX)
5611 temp.bitfield.imm64 = overlap.bitfield.imm64;
5612 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5615 temp.bitfield.imm32 = overlap.bitfield.imm32;
5618 else if (operand_type_equal (&overlap, &imm16_32_32s)
5619 || operand_type_equal (&overlap, &imm16_32)
5620 || operand_type_equal (&overlap, &imm16_32s))
5622 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5627 if (!operand_type_equal (&overlap, &imm8)
5628 && !operand_type_equal (&overlap, &imm8s)
5629 && !operand_type_equal (&overlap, &imm16)
5630 && !operand_type_equal (&overlap, &imm32)
5631 && !operand_type_equal (&overlap, &imm32s)
5632 && !operand_type_equal (&overlap, &imm64))
5634 as_bad (_("no instruction mnemonic suffix given; "
5635 "can't determine immediate size"));
5639 i.types[j] = overlap;
5649 /* Update the first 2 immediate operands. */
5650 n = i.operands > 2 ? 2 : i.operands;
5653 for (j = 0; j < n; j++)
5654 if (update_imm (j) == 0)
5657 /* The 3rd operand can't be immediate operand. */
5658 gas_assert (operand_type_check (i.types[2], imm) == 0);
5665 bad_implicit_operand (int xmm)
5667 const char *ireg = xmm ? "xmm0" : "ymm0";
5670 as_bad (_("the last operand of `%s' must be `%s%s'"),
5671 i.tm.name, register_prefix, ireg);
5673 as_bad (_("the first operand of `%s' must be `%s%s'"),
5674 i.tm.name, register_prefix, ireg);
5679 process_operands (void)
5681 /* Default segment register this instruction will use for memory
5682 accesses. 0 means unknown. This is only for optimizing out
5683 unnecessary segment overrides. */
5684 const seg_entry *default_seg = 0;
5686 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5688 unsigned int dupl = i.operands;
5689 unsigned int dest = dupl - 1;
5692 /* The destination must be an xmm register. */
5693 gas_assert (i.reg_operands
5694 && MAX_OPERANDS > dupl
5695 && operand_type_equal (&i.types[dest], ®xmm));
5697 if (i.tm.opcode_modifier.firstxmm0)
5699 /* The first operand is implicit and must be xmm0. */
5700 gas_assert (operand_type_equal (&i.types[0], ®xmm));
5701 if (register_number (i.op[0].regs) != 0)
5702 return bad_implicit_operand (1);
5704 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5706 /* Keep xmm0 for instructions with VEX prefix and 3
5712 /* We remove the first xmm0 and keep the number of
5713 operands unchanged, which in fact duplicates the
5715 for (j = 1; j < i.operands; j++)
5717 i.op[j - 1] = i.op[j];
5718 i.types[j - 1] = i.types[j];
5719 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5723 else if (i.tm.opcode_modifier.implicit1stxmm0)
5725 gas_assert ((MAX_OPERANDS - 1) > dupl
5726 && (i.tm.opcode_modifier.vexsources
5729 /* Add the implicit xmm0 for instructions with VEX prefix
5731 for (j = i.operands; j > 0; j--)
5733 i.op[j] = i.op[j - 1];
5734 i.types[j] = i.types[j - 1];
5735 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5738 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5739 i.types[0] = regxmm;
5740 i.tm.operand_types[0] = regxmm;
5743 i.reg_operands += 2;
5748 i.op[dupl] = i.op[dest];
5749 i.types[dupl] = i.types[dest];
5750 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5759 i.op[dupl] = i.op[dest];
5760 i.types[dupl] = i.types[dest];
5761 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5764 if (i.tm.opcode_modifier.immext)
5767 else if (i.tm.opcode_modifier.firstxmm0)
5771 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5772 gas_assert (i.reg_operands
5773 && (operand_type_equal (&i.types[0], ®xmm)
5774 || operand_type_equal (&i.types[0], ®ymm)
5775 || operand_type_equal (&i.types[0], ®zmm)));
5776 if (register_number (i.op[0].regs) != 0)
5777 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5779 for (j = 1; j < i.operands; j++)
5781 i.op[j - 1] = i.op[j];
5782 i.types[j - 1] = i.types[j];
5784 /* We need to adjust fields in i.tm since they are used by
5785 build_modrm_byte. */
5786 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5793 else if (i.tm.opcode_modifier.regkludge)
5795 /* The imul $imm, %reg instruction is converted into
5796 imul $imm, %reg, %reg, and the clr %reg instruction
5797 is converted into xor %reg, %reg. */
5799 unsigned int first_reg_op;
5801 if (operand_type_check (i.types[0], reg))
5805 /* Pretend we saw the extra register operand. */
5806 gas_assert (i.reg_operands == 1
5807 && i.op[first_reg_op + 1].regs == 0);
5808 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5809 i.types[first_reg_op + 1] = i.types[first_reg_op];
5814 if (i.tm.opcode_modifier.shortform)
5816 if (i.types[0].bitfield.sreg2
5817 || i.types[0].bitfield.sreg3)
5819 if (i.tm.base_opcode == POP_SEG_SHORT
5820 && i.op[0].regs->reg_num == 1)
5822 as_bad (_("you can't `pop %scs'"), register_prefix);
5825 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5826 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5831 /* The register or float register operand is in operand
5835 if (i.types[0].bitfield.floatreg
5836 || operand_type_check (i.types[0], reg))
5840 /* Register goes in low 3 bits of opcode. */
5841 i.tm.base_opcode |= i.op[op].regs->reg_num;
5842 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5844 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5846 /* Warn about some common errors, but press on regardless.
5847 The first case can be generated by gcc (<= 2.8.1). */
5848 if (i.operands == 2)
5850 /* Reversed arguments on faddp, fsubp, etc. */
5851 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5852 register_prefix, i.op[!intel_syntax].regs->reg_name,
5853 register_prefix, i.op[intel_syntax].regs->reg_name);
5857 /* Extraneous `l' suffix on fp insn. */
5858 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5859 register_prefix, i.op[0].regs->reg_name);
5864 else if (i.tm.opcode_modifier.modrm)
5866 /* The opcode is completed (modulo i.tm.extension_opcode which
5867 must be put into the modrm byte). Now, we make the modrm and
5868 index base bytes based on all the info we've collected. */
5870 default_seg = build_modrm_byte ();
5872 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5876 else if (i.tm.opcode_modifier.isstring)
5878 /* For the string instructions that allow a segment override
5879 on one of their operands, the default segment is ds. */
5883 if (i.tm.base_opcode == 0x8d /* lea */
5886 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5888 /* If a segment was explicitly specified, and the specified segment
5889 is not the default, use an opcode prefix to select it. If we
5890 never figured out what the default segment is, then default_seg
5891 will be zero at this point, and the specified segment prefix will
5893 if ((i.seg[0]) && (i.seg[0] != default_seg))
5895 if (!add_prefix (i.seg[0]->seg_prefix))
5901 static const seg_entry *
5902 build_modrm_byte (void)
5904 const seg_entry *default_seg = 0;
5905 unsigned int source, dest;
5908 /* The first operand of instructions with VEX prefix and 3 sources
5909 must be VEX_Imm4. */
5910 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5913 unsigned int nds, reg_slot;
5916 if (i.tm.opcode_modifier.veximmext
5917 && i.tm.opcode_modifier.immext)
5919 dest = i.operands - 2;
5920 gas_assert (dest == 3);
5923 dest = i.operands - 1;
5926 /* There are 2 kinds of instructions:
5927 1. 5 operands: 4 register operands or 3 register operands
5928 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5929 VexW0 or VexW1. The destination must be either XMM, YMM or
5931 2. 4 operands: 4 register operands or 3 register operands
5932 plus 1 memory operand, VexXDS, and VexImmExt */
5933 gas_assert ((i.reg_operands == 4
5934 || (i.reg_operands == 3 && i.mem_operands == 1))
5935 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5936 && (i.tm.opcode_modifier.veximmext
5937 || (i.imm_operands == 1
5938 && i.types[0].bitfield.vec_imm4
5939 && (i.tm.opcode_modifier.vexw == VEXW0
5940 || i.tm.opcode_modifier.vexw == VEXW1)
5941 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5942 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)
5943 || operand_type_equal (&i.tm.operand_types[dest], ®zmm)))));
5945 if (i.imm_operands == 0)
5947 /* When there is no immediate operand, generate an 8bit
5948 immediate operand to encode the first operand. */
5949 exp = &im_expressions[i.imm_operands++];
5950 i.op[i.operands].imms = exp;
5951 i.types[i.operands] = imm8;
5953 /* If VexW1 is set, the first operand is the source and
5954 the second operand is encoded in the immediate operand. */
5955 if (i.tm.opcode_modifier.vexw == VEXW1)
5966 /* FMA swaps REG and NDS. */
5967 if (i.tm.cpu_flags.bitfield.cpufma)
5975 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5977 || operand_type_equal (&i.tm.operand_types[reg_slot],
5979 || operand_type_equal (&i.tm.operand_types[reg_slot],
5981 exp->X_op = O_constant;
5982 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5983 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
5987 unsigned int imm_slot;
5989 if (i.tm.opcode_modifier.vexw == VEXW0)
5991 /* If VexW0 is set, the third operand is the source and
5992 the second operand is encoded in the immediate
5999 /* VexW1 is set, the second operand is the source and
6000 the third operand is encoded in the immediate
6006 if (i.tm.opcode_modifier.immext)
6008 /* When ImmExt is set, the immdiate byte is the last
6010 imm_slot = i.operands - 1;
6018 /* Turn on Imm8 so that output_imm will generate it. */
6019 i.types[imm_slot].bitfield.imm8 = 1;
6022 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6024 || operand_type_equal (&i.tm.operand_types[reg_slot],
6026 || operand_type_equal (&i.tm.operand_types[reg_slot],
6028 i.op[imm_slot].imms->X_add_number
6029 |= register_number (i.op[reg_slot].regs) << 4;
6030 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6033 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
6034 || operand_type_equal (&i.tm.operand_types[nds],
6036 || operand_type_equal (&i.tm.operand_types[nds],
6038 i.vex.register_specifier = i.op[nds].regs;
6043 /* i.reg_operands MUST be the number of real register operands;
6044 implicit registers do not count. If there are 3 register
6045 operands, it must be a instruction with VexNDS. For a
6046 instruction with VexNDD, the destination register is encoded
6047 in VEX prefix. If there are 4 register operands, it must be
6048 a instruction with VEX prefix and 3 sources. */
6049 if (i.mem_operands == 0
6050 && ((i.reg_operands == 2
6051 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6052 || (i.reg_operands == 3
6053 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6054 || (i.reg_operands == 4 && vex_3_sources)))
6062 /* When there are 3 operands, one of them may be immediate,
6063 which may be the first or the last operand. Otherwise,
6064 the first operand must be shift count register (cl) or it
6065 is an instruction with VexNDS. */
6066 gas_assert (i.imm_operands == 1
6067 || (i.imm_operands == 0
6068 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6069 || i.types[0].bitfield.shiftcount)));
6070 if (operand_type_check (i.types[0], imm)
6071 || i.types[0].bitfield.shiftcount)
6077 /* When there are 4 operands, the first two must be 8bit
6078 immediate operands. The source operand will be the 3rd
6081 For instructions with VexNDS, if the first operand
6082 an imm8, the source operand is the 2nd one. If the last
6083 operand is imm8, the source operand is the first one. */
6084 gas_assert ((i.imm_operands == 2
6085 && i.types[0].bitfield.imm8
6086 && i.types[1].bitfield.imm8)
6087 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6088 && i.imm_operands == 1
6089 && (i.types[0].bitfield.imm8
6090 || i.types[i.operands - 1].bitfield.imm8
6092 if (i.imm_operands == 2)
6096 if (i.types[0].bitfield.imm8)
6103 if (i.tm.opcode_modifier.evex)
6105 /* For EVEX instructions, when there are 5 operands, the
6106 first one must be immediate operand. If the second one
6107 is immediate operand, the source operand is the 3th
6108 one. If the last one is immediate operand, the source
6109 operand is the 2nd one. */
6110 gas_assert (i.imm_operands == 2
6111 && i.tm.opcode_modifier.sae
6112 && operand_type_check (i.types[0], imm));
6113 if (operand_type_check (i.types[1], imm))
6115 else if (operand_type_check (i.types[4], imm))
6129 /* RC/SAE operand could be between DEST and SRC. That happens
6130 when one operand is GPR and the other one is XMM/YMM/ZMM
6132 if (i.rounding && i.rounding->operand == (int) dest)
6135 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6137 /* For instructions with VexNDS, the register-only source
6138 operand must be 32/64bit integer, XMM, YMM or ZMM
6139 register. It is encoded in VEX prefix. We need to
6140 clear RegMem bit before calling operand_type_equal. */
6142 i386_operand_type op;
6145 /* Check register-only source operand when two source
6146 operands are swapped. */
6147 if (!i.tm.operand_types[source].bitfield.baseindex
6148 && i.tm.operand_types[dest].bitfield.baseindex)
6156 op = i.tm.operand_types[vvvv];
6157 op.bitfield.regmem = 0;
6158 if ((dest + 1) >= i.operands
6159 || (op.bitfield.reg32 != 1
6160 && !op.bitfield.reg64 != 1
6161 && !operand_type_equal (&op, ®xmm)
6162 && !operand_type_equal (&op, ®ymm)
6163 && !operand_type_equal (&op, ®zmm)
6164 && !operand_type_equal (&op, ®mask)))
6166 i.vex.register_specifier = i.op[vvvv].regs;
6172 /* One of the register operands will be encoded in the i.tm.reg
6173 field, the other in the combined i.tm.mode and i.tm.regmem
6174 fields. If no form of this instruction supports a memory
6175 destination operand, then we assume the source operand may
6176 sometimes be a memory operand and so we need to store the
6177 destination in the i.rm.reg field. */
6178 if (!i.tm.operand_types[dest].bitfield.regmem
6179 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6181 i.rm.reg = i.op[dest].regs->reg_num;
6182 i.rm.regmem = i.op[source].regs->reg_num;
6183 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6185 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6187 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6189 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6194 i.rm.reg = i.op[source].regs->reg_num;
6195 i.rm.regmem = i.op[dest].regs->reg_num;
6196 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6198 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6200 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6202 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6205 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6207 if (!i.types[0].bitfield.control
6208 && !i.types[1].bitfield.control)
6210 i.rex &= ~(REX_R | REX_B);
6211 add_prefix (LOCK_PREFIX_OPCODE);
6215 { /* If it's not 2 reg operands... */
6220 unsigned int fake_zero_displacement = 0;
6223 for (op = 0; op < i.operands; op++)
6224 if (operand_type_check (i.types[op], anymem))
6226 gas_assert (op < i.operands);
6228 if (i.tm.opcode_modifier.vecsib)
6230 if (i.index_reg->reg_num == RegEiz
6231 || i.index_reg->reg_num == RegRiz)
6234 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6237 i.sib.base = NO_BASE_REGISTER;
6238 i.sib.scale = i.log2_scale_factor;
6239 /* No Vec_Disp8 if there is no base. */
6240 i.types[op].bitfield.vec_disp8 = 0;
6241 i.types[op].bitfield.disp8 = 0;
6242 i.types[op].bitfield.disp16 = 0;
6243 i.types[op].bitfield.disp64 = 0;
6244 if (flag_code != CODE_64BIT)
6246 /* Must be 32 bit */
6247 i.types[op].bitfield.disp32 = 1;
6248 i.types[op].bitfield.disp32s = 0;
6252 i.types[op].bitfield.disp32 = 0;
6253 i.types[op].bitfield.disp32s = 1;
6256 i.sib.index = i.index_reg->reg_num;
6257 if ((i.index_reg->reg_flags & RegRex) != 0)
6259 if ((i.index_reg->reg_flags & RegVRex) != 0)
6265 if (i.base_reg == 0)
6268 if (!i.disp_operands)
6270 fake_zero_displacement = 1;
6271 /* Instructions with VSIB byte need 32bit displacement
6272 if there is no base register. */
6273 if (i.tm.opcode_modifier.vecsib)
6274 i.types[op].bitfield.disp32 = 1;
6276 if (i.index_reg == 0)
6278 gas_assert (!i.tm.opcode_modifier.vecsib);
6279 /* Operand is just <disp> */
6280 if (flag_code == CODE_64BIT)
6282 /* 64bit mode overwrites the 32bit absolute
6283 addressing by RIP relative addressing and
6284 absolute addressing is encoded by one of the
6285 redundant SIB forms. */
6286 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6287 i.sib.base = NO_BASE_REGISTER;
6288 i.sib.index = NO_INDEX_REGISTER;
6289 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6290 ? disp32s : disp32);
6292 else if ((flag_code == CODE_16BIT)
6293 ^ (i.prefix[ADDR_PREFIX] != 0))
6295 i.rm.regmem = NO_BASE_REGISTER_16;
6296 i.types[op] = disp16;
6300 i.rm.regmem = NO_BASE_REGISTER;
6301 i.types[op] = disp32;
6304 else if (!i.tm.opcode_modifier.vecsib)
6306 /* !i.base_reg && i.index_reg */
6307 if (i.index_reg->reg_num == RegEiz
6308 || i.index_reg->reg_num == RegRiz)
6309 i.sib.index = NO_INDEX_REGISTER;
6311 i.sib.index = i.index_reg->reg_num;
6312 i.sib.base = NO_BASE_REGISTER;
6313 i.sib.scale = i.log2_scale_factor;
6314 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6315 /* No Vec_Disp8 if there is no base. */
6316 i.types[op].bitfield.vec_disp8 = 0;
6317 i.types[op].bitfield.disp8 = 0;
6318 i.types[op].bitfield.disp16 = 0;
6319 i.types[op].bitfield.disp64 = 0;
6320 if (flag_code != CODE_64BIT)
6322 /* Must be 32 bit */
6323 i.types[op].bitfield.disp32 = 1;
6324 i.types[op].bitfield.disp32s = 0;
6328 i.types[op].bitfield.disp32 = 0;
6329 i.types[op].bitfield.disp32s = 1;
6331 if ((i.index_reg->reg_flags & RegRex) != 0)
6335 /* RIP addressing for 64bit mode. */
6336 else if (i.base_reg->reg_num == RegRip ||
6337 i.base_reg->reg_num == RegEip)
6339 gas_assert (!i.tm.opcode_modifier.vecsib);
6340 i.rm.regmem = NO_BASE_REGISTER;
6341 i.types[op].bitfield.disp8 = 0;
6342 i.types[op].bitfield.disp16 = 0;
6343 i.types[op].bitfield.disp32 = 0;
6344 i.types[op].bitfield.disp32s = 1;
6345 i.types[op].bitfield.disp64 = 0;
6346 i.types[op].bitfield.vec_disp8 = 0;
6347 i.flags[op] |= Operand_PCrel;
6348 if (! i.disp_operands)
6349 fake_zero_displacement = 1;
6351 else if (i.base_reg->reg_type.bitfield.reg16)
6353 gas_assert (!i.tm.opcode_modifier.vecsib);
6354 switch (i.base_reg->reg_num)
6357 if (i.index_reg == 0)
6359 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6360 i.rm.regmem = i.index_reg->reg_num - 6;
6364 if (i.index_reg == 0)
6367 if (operand_type_check (i.types[op], disp) == 0)
6369 /* fake (%bp) into 0(%bp) */
6370 if (i.tm.operand_types[op].bitfield.vec_disp8)
6371 i.types[op].bitfield.vec_disp8 = 1;
6373 i.types[op].bitfield.disp8 = 1;
6374 fake_zero_displacement = 1;
6377 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6378 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6380 default: /* (%si) -> 4 or (%di) -> 5 */
6381 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6383 i.rm.mode = mode_from_disp_size (i.types[op]);
6385 else /* i.base_reg and 32/64 bit mode */
6387 if (flag_code == CODE_64BIT
6388 && operand_type_check (i.types[op], disp))
6390 i386_operand_type temp;
6391 operand_type_set (&temp, 0);
6392 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6393 temp.bitfield.vec_disp8
6394 = i.types[op].bitfield.vec_disp8;
6396 if (i.prefix[ADDR_PREFIX] == 0)
6397 i.types[op].bitfield.disp32s = 1;
6399 i.types[op].bitfield.disp32 = 1;
6402 if (!i.tm.opcode_modifier.vecsib)
6403 i.rm.regmem = i.base_reg->reg_num;
6404 if ((i.base_reg->reg_flags & RegRex) != 0)
6406 i.sib.base = i.base_reg->reg_num;
6407 /* x86-64 ignores REX prefix bit here to avoid decoder
6409 if (!(i.base_reg->reg_flags & RegRex)
6410 && (i.base_reg->reg_num == EBP_REG_NUM
6411 || i.base_reg->reg_num == ESP_REG_NUM))
6413 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6415 fake_zero_displacement = 1;
6416 if (i.tm.operand_types [op].bitfield.vec_disp8)
6417 i.types[op].bitfield.vec_disp8 = 1;
6419 i.types[op].bitfield.disp8 = 1;
6421 i.sib.scale = i.log2_scale_factor;
6422 if (i.index_reg == 0)
6424 gas_assert (!i.tm.opcode_modifier.vecsib);
6425 /* <disp>(%esp) becomes two byte modrm with no index
6426 register. We've already stored the code for esp
6427 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6428 Any base register besides %esp will not use the
6429 extra modrm byte. */
6430 i.sib.index = NO_INDEX_REGISTER;
6432 else if (!i.tm.opcode_modifier.vecsib)
6434 if (i.index_reg->reg_num == RegEiz
6435 || i.index_reg->reg_num == RegRiz)
6436 i.sib.index = NO_INDEX_REGISTER;
6438 i.sib.index = i.index_reg->reg_num;
6439 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6440 if ((i.index_reg->reg_flags & RegRex) != 0)
6445 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6446 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6450 if (!fake_zero_displacement
6454 fake_zero_displacement = 1;
6455 if (i.disp_encoding == disp_encoding_8bit)
6456 i.types[op].bitfield.disp8 = 1;
6458 i.types[op].bitfield.disp32 = 1;
6460 i.rm.mode = mode_from_disp_size (i.types[op]);
6464 if (fake_zero_displacement)
6466 /* Fakes a zero displacement assuming that i.types[op]
6467 holds the correct displacement size. */
6470 gas_assert (i.op[op].disps == 0);
6471 exp = &disp_expressions[i.disp_operands++];
6472 i.op[op].disps = exp;
6473 exp->X_op = O_constant;
6474 exp->X_add_number = 0;
6475 exp->X_add_symbol = (symbolS *) 0;
6476 exp->X_op_symbol = (symbolS *) 0;
6484 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6486 if (operand_type_check (i.types[0], imm))
6487 i.vex.register_specifier = NULL;
6490 /* VEX.vvvv encodes one of the sources when the first
6491 operand is not an immediate. */
6492 if (i.tm.opcode_modifier.vexw == VEXW0)
6493 i.vex.register_specifier = i.op[0].regs;
6495 i.vex.register_specifier = i.op[1].regs;
6498 /* Destination is a XMM register encoded in the ModRM.reg
6500 i.rm.reg = i.op[2].regs->reg_num;
6501 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6504 /* ModRM.rm and VEX.B encodes the other source. */
6505 if (!i.mem_operands)
6509 if (i.tm.opcode_modifier.vexw == VEXW0)
6510 i.rm.regmem = i.op[1].regs->reg_num;
6512 i.rm.regmem = i.op[0].regs->reg_num;
6514 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6518 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6520 i.vex.register_specifier = i.op[2].regs;
6521 if (!i.mem_operands)
6524 i.rm.regmem = i.op[1].regs->reg_num;
6525 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6529 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6530 (if any) based on i.tm.extension_opcode. Again, we must be
6531 careful to make sure that segment/control/debug/test/MMX
6532 registers are coded into the i.rm.reg field. */
6533 else if (i.reg_operands)
6536 unsigned int vex_reg = ~0;
6538 for (op = 0; op < i.operands; op++)
6539 if (i.types[op].bitfield.reg8
6540 || i.types[op].bitfield.reg16
6541 || i.types[op].bitfield.reg32
6542 || i.types[op].bitfield.reg64
6543 || i.types[op].bitfield.regmmx
6544 || i.types[op].bitfield.regxmm
6545 || i.types[op].bitfield.regymm
6546 || i.types[op].bitfield.regbnd
6547 || i.types[op].bitfield.regzmm
6548 || i.types[op].bitfield.regmask
6549 || i.types[op].bitfield.sreg2
6550 || i.types[op].bitfield.sreg3
6551 || i.types[op].bitfield.control
6552 || i.types[op].bitfield.debug
6553 || i.types[op].bitfield.test)
6558 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6560 /* For instructions with VexNDS, the register-only
6561 source operand is encoded in VEX prefix. */
6562 gas_assert (mem != (unsigned int) ~0);
6567 gas_assert (op < i.operands);
6571 /* Check register-only source operand when two source
6572 operands are swapped. */
6573 if (!i.tm.operand_types[op].bitfield.baseindex
6574 && i.tm.operand_types[op + 1].bitfield.baseindex)
6578 gas_assert (mem == (vex_reg + 1)
6579 && op < i.operands);
6584 gas_assert (vex_reg < i.operands);
6588 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6590 /* For instructions with VexNDD, the register destination
6591 is encoded in VEX prefix. */
6592 if (i.mem_operands == 0)
6594 /* There is no memory operand. */
6595 gas_assert ((op + 2) == i.operands);
6600 /* There are only 2 operands. */
6601 gas_assert (op < 2 && i.operands == 2);
6606 gas_assert (op < i.operands);
6608 if (vex_reg != (unsigned int) ~0)
6610 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6612 if (type->bitfield.reg32 != 1
6613 && type->bitfield.reg64 != 1
6614 && !operand_type_equal (type, ®xmm)
6615 && !operand_type_equal (type, ®ymm)
6616 && !operand_type_equal (type, ®zmm)
6617 && !operand_type_equal (type, ®mask))
6620 i.vex.register_specifier = i.op[vex_reg].regs;
6623 /* Don't set OP operand twice. */
6626 /* If there is an extension opcode to put here, the
6627 register number must be put into the regmem field. */
6628 if (i.tm.extension_opcode != None)
6630 i.rm.regmem = i.op[op].regs->reg_num;
6631 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6633 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6638 i.rm.reg = i.op[op].regs->reg_num;
6639 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6641 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6646 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6647 must set it to 3 to indicate this is a register operand
6648 in the regmem field. */
6649 if (!i.mem_operands)
6653 /* Fill in i.rm.reg field with extension opcode (if any). */
6654 if (i.tm.extension_opcode != None)
6655 i.rm.reg = i.tm.extension_opcode;
6661 output_branch (void)
6667 relax_substateT subtype;
6671 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6672 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6675 if (i.prefix[DATA_PREFIX] != 0)
6681 /* Pentium4 branch hints. */
6682 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6683 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6688 if (i.prefix[REX_PREFIX] != 0)
6694 /* BND prefixed jump. */
6695 if (i.prefix[BND_PREFIX] != 0)
6697 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6701 if (i.prefixes != 0 && !intel_syntax)
6702 as_warn (_("skipping prefixes on this instruction"));
6704 /* It's always a symbol; End frag & setup for relax.
6705 Make sure there is enough room in this frag for the largest
6706 instruction we may generate in md_convert_frag. This is 2
6707 bytes for the opcode and room for the prefix and largest
6709 frag_grow (prefix + 2 + 4);
6710 /* Prefix and 1 opcode byte go in fr_fix. */
6711 p = frag_more (prefix + 1);
6712 if (i.prefix[DATA_PREFIX] != 0)
6713 *p++ = DATA_PREFIX_OPCODE;
6714 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6715 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6716 *p++ = i.prefix[SEG_PREFIX];
6717 if (i.prefix[REX_PREFIX] != 0)
6718 *p++ = i.prefix[REX_PREFIX];
6719 *p = i.tm.base_opcode;
6721 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6722 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6723 else if (cpu_arch_flags.bitfield.cpui386)
6724 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6726 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6729 sym = i.op[0].disps->X_add_symbol;
6730 off = i.op[0].disps->X_add_number;
6732 if (i.op[0].disps->X_op != O_constant
6733 && i.op[0].disps->X_op != O_symbol)
6735 /* Handle complex expressions. */
6736 sym = make_expr_symbol (i.op[0].disps);
6740 /* 1 possible extra opcode + 4 byte displacement go in var part.
6741 Pass reloc in fr_var. */
6742 frag_var (rs_machine_dependent, 5,
6744 || i.reloc[0] != NO_RELOC
6745 || (i.bnd_prefix == NULL && !add_bnd_prefix))
6747 : BFD_RELOC_X86_64_PC32_BND),
6748 subtype, sym, off, p);
6758 if (i.tm.opcode_modifier.jumpbyte)
6760 /* This is a loop or jecxz type instruction. */
6762 if (i.prefix[ADDR_PREFIX] != 0)
6764 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6767 /* Pentium4 branch hints. */
6768 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6769 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6771 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6780 if (flag_code == CODE_16BIT)
6783 if (i.prefix[DATA_PREFIX] != 0)
6785 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6795 if (i.prefix[REX_PREFIX] != 0)
6797 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6801 /* BND prefixed jump. */
6802 if (i.prefix[BND_PREFIX] != 0)
6804 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6808 if (i.prefixes != 0 && !intel_syntax)
6809 as_warn (_("skipping prefixes on this instruction"));
6811 p = frag_more (i.tm.opcode_length + size);
6812 switch (i.tm.opcode_length)
6815 *p++ = i.tm.base_opcode >> 8;
6817 *p++ = i.tm.base_opcode;
6823 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6824 i.op[0].disps, 1, reloc (size, 1, 1,
6825 (i.bnd_prefix != NULL
6829 /* All jumps handled here are signed, but don't use a signed limit
6830 check for 32 and 16 bit jumps as we want to allow wrap around at
6831 4G and 64k respectively. */
6833 fixP->fx_signed = 1;
6837 output_interseg_jump (void)
6845 if (flag_code == CODE_16BIT)
6849 if (i.prefix[DATA_PREFIX] != 0)
6855 if (i.prefix[REX_PREFIX] != 0)
6865 if (i.prefixes != 0 && !intel_syntax)
6866 as_warn (_("skipping prefixes on this instruction"));
6868 /* 1 opcode; 2 segment; offset */
6869 p = frag_more (prefix + 1 + 2 + size);
6871 if (i.prefix[DATA_PREFIX] != 0)
6872 *p++ = DATA_PREFIX_OPCODE;
6874 if (i.prefix[REX_PREFIX] != 0)
6875 *p++ = i.prefix[REX_PREFIX];
6877 *p++ = i.tm.base_opcode;
6878 if (i.op[1].imms->X_op == O_constant)
6880 offsetT n = i.op[1].imms->X_add_number;
6883 && !fits_in_unsigned_word (n)
6884 && !fits_in_signed_word (n))
6886 as_bad (_("16-bit jump out of range"));
6889 md_number_to_chars (p, n, size);
6892 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6893 i.op[1].imms, 0, reloc (size, 0, 0, 0, i.reloc[1]));
6894 if (i.op[0].imms->X_op != O_constant)
6895 as_bad (_("can't handle non absolute segment in `%s'"),
6897 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6903 fragS *insn_start_frag;
6904 offsetT insn_start_off;
6906 /* Tie dwarf2 debug info to the address at the start of the insn.
6907 We can't do this after the insn has been output as the current
6908 frag may have been closed off. eg. by frag_var. */
6909 dwarf2_emit_insn (0);
6911 insn_start_frag = frag_now;
6912 insn_start_off = frag_now_fix ();
6915 if (i.tm.opcode_modifier.jump)
6917 else if (i.tm.opcode_modifier.jumpbyte
6918 || i.tm.opcode_modifier.jumpdword)
6920 else if (i.tm.opcode_modifier.jumpintersegment)
6921 output_interseg_jump ();
6924 /* Output normal instructions here. */
6928 unsigned int prefix;
6930 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6931 don't need the explicit prefix. */
6932 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
6934 switch (i.tm.opcode_length)
6937 if (i.tm.base_opcode & 0xff000000)
6939 prefix = (i.tm.base_opcode >> 24) & 0xff;
6944 if ((i.tm.base_opcode & 0xff0000) != 0)
6946 prefix = (i.tm.base_opcode >> 16) & 0xff;
6947 if (i.tm.cpu_flags.bitfield.cpupadlock)
6950 if (prefix != REPE_PREFIX_OPCODE
6951 || (i.prefix[REP_PREFIX]
6952 != REPE_PREFIX_OPCODE))
6953 add_prefix (prefix);
6956 add_prefix (prefix);
6965 /* The prefix bytes. */
6966 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6968 FRAG_APPEND_1_CHAR (*q);
6972 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6977 /* REX byte is encoded in VEX prefix. */
6981 FRAG_APPEND_1_CHAR (*q);
6984 /* There should be no other prefixes for instructions
6989 /* For EVEX instructions i.vrex should become 0 after
6990 build_evex_prefix. For VEX instructions upper 16 registers
6991 aren't available, so VREX should be 0. */
6994 /* Now the VEX prefix. */
6995 p = frag_more (i.vex.length);
6996 for (j = 0; j < i.vex.length; j++)
6997 p[j] = i.vex.bytes[j];
7000 /* Now the opcode; be careful about word order here! */
7001 if (i.tm.opcode_length == 1)
7003 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
7007 switch (i.tm.opcode_length)
7011 *p++ = (i.tm.base_opcode >> 24) & 0xff;
7012 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7016 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7026 /* Put out high byte first: can't use md_number_to_chars! */
7027 *p++ = (i.tm.base_opcode >> 8) & 0xff;
7028 *p = i.tm.base_opcode & 0xff;
7031 /* Now the modrm byte and sib byte (if present). */
7032 if (i.tm.opcode_modifier.modrm)
7034 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7037 /* If i.rm.regmem == ESP (4)
7038 && i.rm.mode != (Register mode)
7040 ==> need second modrm byte. */
7041 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7043 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7044 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7046 | i.sib.scale << 6));
7049 if (i.disp_operands)
7050 output_disp (insn_start_frag, insn_start_off);
7053 output_imm (insn_start_frag, insn_start_off);
7059 pi ("" /*line*/, &i);
7061 #endif /* DEBUG386 */
7064 /* Return the size of the displacement operand N. */
7067 disp_size (unsigned int n)
7071 /* Vec_Disp8 has to be 8bit. */
7072 if (i.types[n].bitfield.vec_disp8)
7074 else if (i.types[n].bitfield.disp64)
7076 else if (i.types[n].bitfield.disp8)
7078 else if (i.types[n].bitfield.disp16)
7083 /* Return the size of the immediate operand N. */
7086 imm_size (unsigned int n)
7089 if (i.types[n].bitfield.imm64)
7091 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7093 else if (i.types[n].bitfield.imm16)
7099 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7104 for (n = 0; n < i.operands; n++)
7106 if (i.types[n].bitfield.vec_disp8
7107 || operand_type_check (i.types[n], disp))
7109 if (i.op[n].disps->X_op == O_constant)
7111 int size = disp_size (n);
7112 offsetT val = i.op[n].disps->X_add_number;
7114 if (i.types[n].bitfield.vec_disp8)
7116 val = offset_in_range (val, size);
7117 p = frag_more (size);
7118 md_number_to_chars (p, val, size);
7122 enum bfd_reloc_code_real reloc_type;
7123 int size = disp_size (n);
7124 int sign = i.types[n].bitfield.disp32s;
7125 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7127 /* We can't have 8 bit displacement here. */
7128 gas_assert (!i.types[n].bitfield.disp8);
7130 /* The PC relative address is computed relative
7131 to the instruction boundary, so in case immediate
7132 fields follows, we need to adjust the value. */
7133 if (pcrel && i.imm_operands)
7138 for (n1 = 0; n1 < i.operands; n1++)
7139 if (operand_type_check (i.types[n1], imm))
7141 /* Only one immediate is allowed for PC
7142 relative address. */
7143 gas_assert (sz == 0);
7145 i.op[n].disps->X_add_number -= sz;
7147 /* We should find the immediate. */
7148 gas_assert (sz != 0);
7151 p = frag_more (size);
7152 reloc_type = reloc (size, pcrel, sign,
7153 (i.bnd_prefix != NULL
7157 && GOT_symbol == i.op[n].disps->X_add_symbol
7158 && (((reloc_type == BFD_RELOC_32
7159 || reloc_type == BFD_RELOC_X86_64_32S
7160 || (reloc_type == BFD_RELOC_64
7162 && (i.op[n].disps->X_op == O_symbol
7163 || (i.op[n].disps->X_op == O_add
7164 && ((symbol_get_value_expression
7165 (i.op[n].disps->X_op_symbol)->X_op)
7167 || reloc_type == BFD_RELOC_32_PCREL))
7171 if (insn_start_frag == frag_now)
7172 add = (p - frag_now->fr_literal) - insn_start_off;
7177 add = insn_start_frag->fr_fix - insn_start_off;
7178 for (fr = insn_start_frag->fr_next;
7179 fr && fr != frag_now; fr = fr->fr_next)
7181 add += p - frag_now->fr_literal;
7186 reloc_type = BFD_RELOC_386_GOTPC;
7187 i.op[n].imms->X_add_number += add;
7189 else if (reloc_type == BFD_RELOC_64)
7190 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7192 /* Don't do the adjustment for x86-64, as there
7193 the pcrel addressing is relative to the _next_
7194 insn, and that is taken care of in other code. */
7195 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7197 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7198 i.op[n].disps, pcrel, reloc_type);
7205 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7210 for (n = 0; n < i.operands; n++)
7212 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7213 if (i.rounding && (int) n == i.rounding->operand)
7216 if (operand_type_check (i.types[n], imm))
7218 if (i.op[n].imms->X_op == O_constant)
7220 int size = imm_size (n);
7223 val = offset_in_range (i.op[n].imms->X_add_number,
7225 p = frag_more (size);
7226 md_number_to_chars (p, val, size);
7230 /* Not absolute_section.
7231 Need a 32-bit fixup (don't support 8bit
7232 non-absolute imms). Try to support other
7234 enum bfd_reloc_code_real reloc_type;
7235 int size = imm_size (n);
7238 if (i.types[n].bitfield.imm32s
7239 && (i.suffix == QWORD_MNEM_SUFFIX
7240 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7245 p = frag_more (size);
7246 reloc_type = reloc (size, 0, sign, 0, i.reloc[n]);
7248 /* This is tough to explain. We end up with this one if we
7249 * have operands that look like
7250 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7251 * obtain the absolute address of the GOT, and it is strongly
7252 * preferable from a performance point of view to avoid using
7253 * a runtime relocation for this. The actual sequence of
7254 * instructions often look something like:
7259 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7261 * The call and pop essentially return the absolute address
7262 * of the label .L66 and store it in %ebx. The linker itself
7263 * will ultimately change the first operand of the addl so
7264 * that %ebx points to the GOT, but to keep things simple, the
7265 * .o file must have this operand set so that it generates not
7266 * the absolute address of .L66, but the absolute address of
7267 * itself. This allows the linker itself simply treat a GOTPC
7268 * relocation as asking for a pcrel offset to the GOT to be
7269 * added in, and the addend of the relocation is stored in the
7270 * operand field for the instruction itself.
7272 * Our job here is to fix the operand so that it would add
7273 * the correct offset so that %ebx would point to itself. The
7274 * thing that is tricky is that .-.L66 will point to the
7275 * beginning of the instruction, so we need to further modify
7276 * the operand so that it will point to itself. There are
7277 * other cases where you have something like:
7279 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7281 * and here no correction would be required. Internally in
7282 * the assembler we treat operands of this form as not being
7283 * pcrel since the '.' is explicitly mentioned, and I wonder
7284 * whether it would simplify matters to do it this way. Who
7285 * knows. In earlier versions of the PIC patches, the
7286 * pcrel_adjust field was used to store the correction, but
7287 * since the expression is not pcrel, I felt it would be
7288 * confusing to do it this way. */
7290 if ((reloc_type == BFD_RELOC_32
7291 || reloc_type == BFD_RELOC_X86_64_32S
7292 || reloc_type == BFD_RELOC_64)
7294 && GOT_symbol == i.op[n].imms->X_add_symbol
7295 && (i.op[n].imms->X_op == O_symbol
7296 || (i.op[n].imms->X_op == O_add
7297 && ((symbol_get_value_expression
7298 (i.op[n].imms->X_op_symbol)->X_op)
7303 if (insn_start_frag == frag_now)
7304 add = (p - frag_now->fr_literal) - insn_start_off;
7309 add = insn_start_frag->fr_fix - insn_start_off;
7310 for (fr = insn_start_frag->fr_next;
7311 fr && fr != frag_now; fr = fr->fr_next)
7313 add += p - frag_now->fr_literal;
7317 reloc_type = BFD_RELOC_386_GOTPC;
7319 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7321 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7322 i.op[n].imms->X_add_number += add;
7324 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7325 i.op[n].imms, 0, reloc_type);
7331 /* x86_cons_fix_new is called via the expression parsing code when a
7332 reloc is needed. We use this hook to get the correct .got reloc. */
7333 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
7334 static int cons_sign = -1;
7337 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7340 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, 0, got_reloc);
7342 got_reloc = NO_RELOC;
7345 if (exp->X_op == O_secrel)
7347 exp->X_op = O_symbol;
7348 r = BFD_RELOC_32_SECREL;
7352 fix_new_exp (frag, off, len, exp, 0, r);
7355 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7356 purpose of the `.dc.a' internal pseudo-op. */
7359 x86_address_bytes (void)
7361 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7363 return stdoutput->arch_info->bits_per_address / 8;
7366 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7368 # define lex_got(reloc, adjust, types, bnd_prefix) NULL
7370 /* Parse operands of the form
7371 <symbol>@GOTOFF+<nnn>
7372 and similar .plt or .got references.
7374 If we find one, set up the correct relocation in RELOC and copy the
7375 input string, minus the `@GOTOFF' into a malloc'd buffer for
7376 parsing by the calling routine. Return this buffer, and if ADJUST
7377 is non-null set it to the length of the string we removed from the
7378 input line. Otherwise return NULL. */
7380 lex_got (enum bfd_reloc_code_real *rel,
7382 i386_operand_type *types,
7385 /* Some of the relocations depend on the size of what field is to
7386 be relocated. But in our callers i386_immediate and i386_displacement
7387 we don't yet know the operand size (this will be set by insn
7388 matching). Hence we record the word32 relocation here,
7389 and adjust the reloc according to the real size in reloc(). */
7390 static const struct {
7393 const enum bfd_reloc_code_real rel[2];
7394 const i386_operand_type types64;
7396 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7397 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7399 OPERAND_TYPE_IMM32_64 },
7401 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7402 BFD_RELOC_X86_64_PLTOFF64 },
7403 OPERAND_TYPE_IMM64 },
7404 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7405 BFD_RELOC_X86_64_PLT32 },
7406 OPERAND_TYPE_IMM32_32S_DISP32 },
7407 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7408 BFD_RELOC_X86_64_GOTPLT64 },
7409 OPERAND_TYPE_IMM64_DISP64 },
7410 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7411 BFD_RELOC_X86_64_GOTOFF64 },
7412 OPERAND_TYPE_IMM64_DISP64 },
7413 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7414 BFD_RELOC_X86_64_GOTPCREL },
7415 OPERAND_TYPE_IMM32_32S_DISP32 },
7416 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7417 BFD_RELOC_X86_64_TLSGD },
7418 OPERAND_TYPE_IMM32_32S_DISP32 },
7419 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7420 _dummy_first_bfd_reloc_code_real },
7421 OPERAND_TYPE_NONE },
7422 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7423 BFD_RELOC_X86_64_TLSLD },
7424 OPERAND_TYPE_IMM32_32S_DISP32 },
7425 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7426 BFD_RELOC_X86_64_GOTTPOFF },
7427 OPERAND_TYPE_IMM32_32S_DISP32 },
7428 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7429 BFD_RELOC_X86_64_TPOFF32 },
7430 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7431 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7432 _dummy_first_bfd_reloc_code_real },
7433 OPERAND_TYPE_NONE },
7434 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7435 BFD_RELOC_X86_64_DTPOFF32 },
7436 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7437 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7438 _dummy_first_bfd_reloc_code_real },
7439 OPERAND_TYPE_NONE },
7440 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7441 _dummy_first_bfd_reloc_code_real },
7442 OPERAND_TYPE_NONE },
7443 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7444 BFD_RELOC_X86_64_GOT32 },
7445 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7446 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7447 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7448 OPERAND_TYPE_IMM32_32S_DISP32 },
7449 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7450 BFD_RELOC_X86_64_TLSDESC_CALL },
7451 OPERAND_TYPE_IMM32_32S_DISP32 },
7456 #if defined (OBJ_MAYBE_ELF)
7461 for (cp = input_line_pointer; *cp != '@'; cp++)
7462 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7465 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7467 int len = gotrel[j].len;
7468 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7470 if (gotrel[j].rel[object_64bit] != 0)
7473 char *tmpbuf, *past_reloc;
7475 *rel = gotrel[j].rel[object_64bit];
7479 if (flag_code != CODE_64BIT)
7481 types->bitfield.imm32 = 1;
7482 types->bitfield.disp32 = 1;
7485 *types = gotrel[j].types64;
7488 if (j != 0 && GOT_symbol == NULL)
7489 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7491 /* The length of the first part of our input line. */
7492 first = cp - input_line_pointer;
7494 /* The second part goes from after the reloc token until
7495 (and including) an end_of_line char or comma. */
7496 past_reloc = cp + 1 + len;
7498 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7500 second = cp + 1 - past_reloc;
7502 /* Allocate and copy string. The trailing NUL shouldn't
7503 be necessary, but be safe. */
7504 tmpbuf = (char *) xmalloc (first + second + 2);
7505 memcpy (tmpbuf, input_line_pointer, first);
7506 if (second != 0 && *past_reloc != ' ')
7507 /* Replace the relocation token with ' ', so that
7508 errors like foo@GOTOFF1 will be detected. */
7509 tmpbuf[first++] = ' ';
7511 /* Increment length by 1 if the relocation token is
7516 memcpy (tmpbuf + first, past_reloc, second);
7517 tmpbuf[first + second] = '\0';
7518 if (bnd_prefix && *rel == BFD_RELOC_X86_64_PLT32)
7519 *rel = BFD_RELOC_X86_64_PLT32_BND;
7523 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7524 gotrel[j].str, 1 << (5 + object_64bit));
7529 /* Might be a symbol version string. Don't as_bad here. */
7538 /* Parse operands of the form
7539 <symbol>@SECREL32+<nnn>
7541 If we find one, set up the correct relocation in RELOC and copy the
7542 input string, minus the `@SECREL32' into a malloc'd buffer for
7543 parsing by the calling routine. Return this buffer, and if ADJUST
7544 is non-null set it to the length of the string we removed from the
7545 input line. Otherwise return NULL.
7547 This function is copied from the ELF version above adjusted for PE targets. */
7550 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7551 int *adjust ATTRIBUTE_UNUSED,
7552 i386_operand_type *types,
7553 int bnd_prefix ATTRIBUTE_UNUSED)
7559 const enum bfd_reloc_code_real rel[2];
7560 const i386_operand_type types64;
7564 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7565 BFD_RELOC_32_SECREL },
7566 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7572 for (cp = input_line_pointer; *cp != '@'; cp++)
7573 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7576 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7578 int len = gotrel[j].len;
7580 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7582 if (gotrel[j].rel[object_64bit] != 0)
7585 char *tmpbuf, *past_reloc;
7587 *rel = gotrel[j].rel[object_64bit];
7593 if (flag_code != CODE_64BIT)
7595 types->bitfield.imm32 = 1;
7596 types->bitfield.disp32 = 1;
7599 *types = gotrel[j].types64;
7602 /* The length of the first part of our input line. */
7603 first = cp - input_line_pointer;
7605 /* The second part goes from after the reloc token until
7606 (and including) an end_of_line char or comma. */
7607 past_reloc = cp + 1 + len;
7609 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7611 second = cp + 1 - past_reloc;
7613 /* Allocate and copy string. The trailing NUL shouldn't
7614 be necessary, but be safe. */
7615 tmpbuf = (char *) xmalloc (first + second + 2);
7616 memcpy (tmpbuf, input_line_pointer, first);
7617 if (second != 0 && *past_reloc != ' ')
7618 /* Replace the relocation token with ' ', so that
7619 errors like foo@SECLREL321 will be detected. */
7620 tmpbuf[first++] = ' ';
7621 memcpy (tmpbuf + first, past_reloc, second);
7622 tmpbuf[first + second] = '\0';
7626 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7627 gotrel[j].str, 1 << (5 + object_64bit));
7632 /* Might be a symbol version string. Don't as_bad here. */
7639 x86_cons (expressionS *exp, int size)
7641 intel_syntax = -intel_syntax;
7644 if (size == 4 || (object_64bit && size == 8))
7646 /* Handle @GOTOFF and the like in an expression. */
7648 char *gotfree_input_line;
7651 save = input_line_pointer;
7652 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL, 0);
7653 if (gotfree_input_line)
7654 input_line_pointer = gotfree_input_line;
7658 if (gotfree_input_line)
7660 /* expression () has merrily parsed up to the end of line,
7661 or a comma - in the wrong buffer. Transfer how far
7662 input_line_pointer has moved to the right buffer. */
7663 input_line_pointer = (save
7664 + (input_line_pointer - gotfree_input_line)
7666 free (gotfree_input_line);
7667 if (exp->X_op == O_constant
7668 || exp->X_op == O_absent
7669 || exp->X_op == O_illegal
7670 || exp->X_op == O_register
7671 || exp->X_op == O_big)
7673 char c = *input_line_pointer;
7674 *input_line_pointer = 0;
7675 as_bad (_("missing or invalid expression `%s'"), save);
7676 *input_line_pointer = c;
7683 intel_syntax = -intel_syntax;
7686 i386_intel_simplify (exp);
7690 signed_cons (int size)
7692 if (flag_code == CODE_64BIT)
7700 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7707 if (exp.X_op == O_symbol)
7708 exp.X_op = O_secrel;
7710 emit_expr (&exp, 4);
7712 while (*input_line_pointer++ == ',');
7714 input_line_pointer--;
7715 demand_empty_rest_of_line ();
7719 /* Handle Vector operations. */
7722 check_VecOperations (char *op_string, char *op_end)
7724 const reg_entry *mask;
7729 && (op_end == NULL || op_string < op_end))
7732 if (*op_string == '{')
7736 /* Check broadcasts. */
7737 if (strncmp (op_string, "1to", 3) == 0)
7742 goto duplicated_vec_op;
7745 if (*op_string == '8')
7746 bcst_type = BROADCAST_1TO8;
7747 else if (*op_string == '1'
7748 && *(op_string+1) == '6')
7750 bcst_type = BROADCAST_1TO16;
7755 as_bad (_("Unsupported broadcast: `%s'"), saved);
7760 broadcast_op.type = bcst_type;
7761 broadcast_op.operand = this_operand;
7762 i.broadcast = &broadcast_op;
7764 /* Check masking operation. */
7765 else if ((mask = parse_register (op_string, &end_op)) != NULL)
7767 /* k0 can't be used for write mask. */
7768 if (mask->reg_num == 0)
7770 as_bad (_("`%s' can't be used for write mask"),
7777 mask_op.mask = mask;
7778 mask_op.zeroing = 0;
7779 mask_op.operand = this_operand;
7785 goto duplicated_vec_op;
7787 i.mask->mask = mask;
7789 /* Only "{z}" is allowed here. No need to check
7790 zeroing mask explicitly. */
7791 if (i.mask->operand != this_operand)
7793 as_bad (_("invalid write mask `%s'"), saved);
7800 /* Check zeroing-flag for masking operation. */
7801 else if (*op_string == 'z')
7805 mask_op.mask = NULL;
7806 mask_op.zeroing = 1;
7807 mask_op.operand = this_operand;
7812 if (i.mask->zeroing)
7815 as_bad (_("duplicated `%s'"), saved);
7819 i.mask->zeroing = 1;
7821 /* Only "{%k}" is allowed here. No need to check mask
7822 register explicitly. */
7823 if (i.mask->operand != this_operand)
7825 as_bad (_("invalid zeroing-masking `%s'"),
7834 goto unknown_vec_op;
7836 if (*op_string != '}')
7838 as_bad (_("missing `}' in `%s'"), saved);
7845 /* We don't know this one. */
7846 as_bad (_("unknown vector operation: `%s'"), saved);
7854 i386_immediate (char *imm_start)
7856 char *save_input_line_pointer;
7857 char *gotfree_input_line;
7860 i386_operand_type types;
7862 operand_type_set (&types, ~0);
7864 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7866 as_bad (_("at most %d immediate operands are allowed"),
7867 MAX_IMMEDIATE_OPERANDS);
7871 exp = &im_expressions[i.imm_operands++];
7872 i.op[this_operand].imms = exp;
7874 if (is_space_char (*imm_start))
7877 save_input_line_pointer = input_line_pointer;
7878 input_line_pointer = imm_start;
7880 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types,
7881 (i.bnd_prefix != NULL
7882 || add_bnd_prefix));
7883 if (gotfree_input_line)
7884 input_line_pointer = gotfree_input_line;
7886 exp_seg = expression (exp);
7890 /* Handle vector operations. */
7891 if (*input_line_pointer == '{')
7893 input_line_pointer = check_VecOperations (input_line_pointer,
7895 if (input_line_pointer == NULL)
7899 if (*input_line_pointer)
7900 as_bad (_("junk `%s' after expression"), input_line_pointer);
7902 input_line_pointer = save_input_line_pointer;
7903 if (gotfree_input_line)
7905 free (gotfree_input_line);
7907 if (exp->X_op == O_constant || exp->X_op == O_register)
7908 exp->X_op = O_illegal;
7911 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7915 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7916 i386_operand_type types, const char *imm_start)
7918 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7921 as_bad (_("missing or invalid immediate expression `%s'"),
7925 else if (exp->X_op == O_constant)
7927 /* Size it properly later. */
7928 i.types[this_operand].bitfield.imm64 = 1;
7929 /* If not 64bit, sign extend val. */
7930 if (flag_code != CODE_64BIT
7931 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7933 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7935 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7936 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7937 && exp_seg != absolute_section
7938 && exp_seg != text_section
7939 && exp_seg != data_section
7940 && exp_seg != bss_section
7941 && exp_seg != undefined_section
7942 && !bfd_is_com_section (exp_seg))
7944 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7948 else if (!intel_syntax && exp->X_op == O_register)
7951 as_bad (_("illegal immediate register operand %s"), imm_start);
7956 /* This is an address. The size of the address will be
7957 determined later, depending on destination register,
7958 suffix, or the default for the section. */
7959 i.types[this_operand].bitfield.imm8 = 1;
7960 i.types[this_operand].bitfield.imm16 = 1;
7961 i.types[this_operand].bitfield.imm32 = 1;
7962 i.types[this_operand].bitfield.imm32s = 1;
7963 i.types[this_operand].bitfield.imm64 = 1;
7964 i.types[this_operand] = operand_type_and (i.types[this_operand],
7972 i386_scale (char *scale)
7975 char *save = input_line_pointer;
7977 input_line_pointer = scale;
7978 val = get_absolute_expression ();
7983 i.log2_scale_factor = 0;
7986 i.log2_scale_factor = 1;
7989 i.log2_scale_factor = 2;
7992 i.log2_scale_factor = 3;
7996 char sep = *input_line_pointer;
7998 *input_line_pointer = '\0';
7999 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8001 *input_line_pointer = sep;
8002 input_line_pointer = save;
8006 if (i.log2_scale_factor != 0 && i.index_reg == 0)
8008 as_warn (_("scale factor of %d without an index register"),
8009 1 << i.log2_scale_factor);
8010 i.log2_scale_factor = 0;
8012 scale = input_line_pointer;
8013 input_line_pointer = save;
8018 i386_displacement (char *disp_start, char *disp_end)
8022 char *save_input_line_pointer;
8023 char *gotfree_input_line;
8025 i386_operand_type bigdisp, types = anydisp;
8028 if (i.disp_operands == MAX_MEMORY_OPERANDS)
8030 as_bad (_("at most %d displacement operands are allowed"),
8031 MAX_MEMORY_OPERANDS);
8035 operand_type_set (&bigdisp, 0);
8036 if ((i.types[this_operand].bitfield.jumpabsolute)
8037 || (!current_templates->start->opcode_modifier.jump
8038 && !current_templates->start->opcode_modifier.jumpdword))
8040 bigdisp.bitfield.disp32 = 1;
8041 override = (i.prefix[ADDR_PREFIX] != 0);
8042 if (flag_code == CODE_64BIT)
8046 bigdisp.bitfield.disp32s = 1;
8047 bigdisp.bitfield.disp64 = 1;
8050 else if ((flag_code == CODE_16BIT) ^ override)
8052 bigdisp.bitfield.disp32 = 0;
8053 bigdisp.bitfield.disp16 = 1;
8058 /* For PC-relative branches, the width of the displacement
8059 is dependent upon data size, not address size. */
8060 override = (i.prefix[DATA_PREFIX] != 0);
8061 if (flag_code == CODE_64BIT)
8063 if (override || i.suffix == WORD_MNEM_SUFFIX)
8064 bigdisp.bitfield.disp16 = 1;
8067 bigdisp.bitfield.disp32 = 1;
8068 bigdisp.bitfield.disp32s = 1;
8074 override = (i.suffix == (flag_code != CODE_16BIT
8076 : LONG_MNEM_SUFFIX));
8077 bigdisp.bitfield.disp32 = 1;
8078 if ((flag_code == CODE_16BIT) ^ override)
8080 bigdisp.bitfield.disp32 = 0;
8081 bigdisp.bitfield.disp16 = 1;
8085 i.types[this_operand] = operand_type_or (i.types[this_operand],
8088 exp = &disp_expressions[i.disp_operands];
8089 i.op[this_operand].disps = exp;
8091 save_input_line_pointer = input_line_pointer;
8092 input_line_pointer = disp_start;
8093 END_STRING_AND_SAVE (disp_end);
8095 #ifndef GCC_ASM_O_HACK
8096 #define GCC_ASM_O_HACK 0
8099 END_STRING_AND_SAVE (disp_end + 1);
8100 if (i.types[this_operand].bitfield.baseIndex
8101 && displacement_string_end[-1] == '+')
8103 /* This hack is to avoid a warning when using the "o"
8104 constraint within gcc asm statements.
8107 #define _set_tssldt_desc(n,addr,limit,type) \
8108 __asm__ __volatile__ ( \
8110 "movw %w1,2+%0\n\t" \
8112 "movb %b1,4+%0\n\t" \
8113 "movb %4,5+%0\n\t" \
8114 "movb $0,6+%0\n\t" \
8115 "movb %h1,7+%0\n\t" \
8117 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8119 This works great except that the output assembler ends
8120 up looking a bit weird if it turns out that there is
8121 no offset. You end up producing code that looks like:
8134 So here we provide the missing zero. */
8136 *displacement_string_end = '0';
8139 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types,
8140 (i.bnd_prefix != NULL
8141 || add_bnd_prefix));
8142 if (gotfree_input_line)
8143 input_line_pointer = gotfree_input_line;
8145 exp_seg = expression (exp);
8148 if (*input_line_pointer)
8149 as_bad (_("junk `%s' after expression"), input_line_pointer);
8151 RESTORE_END_STRING (disp_end + 1);
8153 input_line_pointer = save_input_line_pointer;
8154 if (gotfree_input_line)
8156 free (gotfree_input_line);
8158 if (exp->X_op == O_constant || exp->X_op == O_register)
8159 exp->X_op = O_illegal;
8162 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8164 RESTORE_END_STRING (disp_end);
8170 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8171 i386_operand_type types, const char *disp_start)
8173 i386_operand_type bigdisp;
8176 /* We do this to make sure that the section symbol is in
8177 the symbol table. We will ultimately change the relocation
8178 to be relative to the beginning of the section. */
8179 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8180 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8181 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8183 if (exp->X_op != O_symbol)
8186 if (S_IS_LOCAL (exp->X_add_symbol)
8187 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8188 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8189 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8190 exp->X_op = O_subtract;
8191 exp->X_op_symbol = GOT_symbol;
8192 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8193 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8194 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8195 i.reloc[this_operand] = BFD_RELOC_64;
8197 i.reloc[this_operand] = BFD_RELOC_32;
8200 else if (exp->X_op == O_absent
8201 || exp->X_op == O_illegal
8202 || exp->X_op == O_big)
8205 as_bad (_("missing or invalid displacement expression `%s'"),
8210 else if (flag_code == CODE_64BIT
8211 && !i.prefix[ADDR_PREFIX]
8212 && exp->X_op == O_constant)
8214 /* Since displacement is signed extended to 64bit, don't allow
8215 disp32 and turn off disp32s if they are out of range. */
8216 i.types[this_operand].bitfield.disp32 = 0;
8217 if (!fits_in_signed_long (exp->X_add_number))
8219 i.types[this_operand].bitfield.disp32s = 0;
8220 if (i.types[this_operand].bitfield.baseindex)
8222 as_bad (_("0x%lx out range of signed 32bit displacement"),
8223 (long) exp->X_add_number);
8229 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8230 else if (exp->X_op != O_constant
8231 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8232 && exp_seg != absolute_section
8233 && exp_seg != text_section
8234 && exp_seg != data_section
8235 && exp_seg != bss_section
8236 && exp_seg != undefined_section
8237 && !bfd_is_com_section (exp_seg))
8239 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8244 /* Check if this is a displacement only operand. */
8245 bigdisp = i.types[this_operand];
8246 bigdisp.bitfield.disp8 = 0;
8247 bigdisp.bitfield.disp16 = 0;
8248 bigdisp.bitfield.disp32 = 0;
8249 bigdisp.bitfield.disp32s = 0;
8250 bigdisp.bitfield.disp64 = 0;
8251 if (operand_type_all_zero (&bigdisp))
8252 i.types[this_operand] = operand_type_and (i.types[this_operand],
8258 /* Make sure the memory operand we've been dealt is valid.
8259 Return 1 on success, 0 on a failure. */
8262 i386_index_check (const char *operand_string)
8264 const char *kind = "base/index";
8265 enum flag_code addr_mode;
8267 if (i.prefix[ADDR_PREFIX])
8268 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8271 addr_mode = flag_code;
8273 #if INFER_ADDR_PREFIX
8274 if (i.mem_operands == 0)
8276 /* Infer address prefix from the first memory operand. */
8277 const reg_entry *addr_reg = i.base_reg;
8279 if (addr_reg == NULL)
8280 addr_reg = i.index_reg;
8284 if (addr_reg->reg_num == RegEip
8285 || addr_reg->reg_num == RegEiz
8286 || addr_reg->reg_type.bitfield.reg32)
8287 addr_mode = CODE_32BIT;
8288 else if (flag_code != CODE_64BIT
8289 && addr_reg->reg_type.bitfield.reg16)
8290 addr_mode = CODE_16BIT;
8292 if (addr_mode != flag_code)
8294 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8296 /* Change the size of any displacement too. At most one
8297 of Disp16 or Disp32 is set.
8298 FIXME. There doesn't seem to be any real need for
8299 separate Disp16 and Disp32 flags. The same goes for
8300 Imm16 and Imm32. Removing them would probably clean
8301 up the code quite a lot. */
8302 if (flag_code != CODE_64BIT
8303 && (i.types[this_operand].bitfield.disp16
8304 || i.types[this_operand].bitfield.disp32))
8305 i.types[this_operand]
8306 = operand_type_xor (i.types[this_operand], disp16_32);
8313 if (current_templates->start->opcode_modifier.isstring
8314 && !current_templates->start->opcode_modifier.immext
8315 && (current_templates->end[-1].opcode_modifier.isstring
8318 /* Memory operands of string insns are special in that they only allow
8319 a single register (rDI, rSI, or rBX) as their memory address. */
8320 const reg_entry *expected_reg;
8321 static const char *di_si[][2] =
8327 static const char *bx[] = { "ebx", "bx", "rbx" };
8329 kind = "string address";
8331 if (current_templates->start->opcode_modifier.w)
8333 i386_operand_type type = current_templates->end[-1].operand_types[0];
8335 if (!type.bitfield.baseindex
8336 || ((!i.mem_operands != !intel_syntax)
8337 && current_templates->end[-1].operand_types[1]
8338 .bitfield.baseindex))
8339 type = current_templates->end[-1].operand_types[1];
8340 expected_reg = hash_find (reg_hash,
8341 di_si[addr_mode][type.bitfield.esseg]);
8345 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8347 if (i.base_reg != expected_reg
8349 || operand_type_check (i.types[this_operand], disp))
8351 /* The second memory operand must have the same size as
8355 && !((addr_mode == CODE_64BIT
8356 && i.base_reg->reg_type.bitfield.reg64)
8357 || (addr_mode == CODE_32BIT
8358 ? i.base_reg->reg_type.bitfield.reg32
8359 : i.base_reg->reg_type.bitfield.reg16)))
8362 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8364 intel_syntax ? '[' : '(',
8366 expected_reg->reg_name,
8367 intel_syntax ? ']' : ')');
8374 as_bad (_("`%s' is not a valid %s expression"),
8375 operand_string, kind);
8380 if (addr_mode != CODE_16BIT)
8382 /* 32-bit/64-bit checks. */
8384 && (addr_mode == CODE_64BIT
8385 ? !i.base_reg->reg_type.bitfield.reg64
8386 : !i.base_reg->reg_type.bitfield.reg32)
8388 || (i.base_reg->reg_num
8389 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8391 && !i.index_reg->reg_type.bitfield.regxmm
8392 && !i.index_reg->reg_type.bitfield.regymm
8393 && !i.index_reg->reg_type.bitfield.regzmm
8394 && ((addr_mode == CODE_64BIT
8395 ? !(i.index_reg->reg_type.bitfield.reg64
8396 || i.index_reg->reg_num == RegRiz)
8397 : !(i.index_reg->reg_type.bitfield.reg32
8398 || i.index_reg->reg_num == RegEiz))
8399 || !i.index_reg->reg_type.bitfield.baseindex)))
8404 /* 16-bit checks. */
8406 && (!i.base_reg->reg_type.bitfield.reg16
8407 || !i.base_reg->reg_type.bitfield.baseindex))
8409 && (!i.index_reg->reg_type.bitfield.reg16
8410 || !i.index_reg->reg_type.bitfield.baseindex
8412 && i.base_reg->reg_num < 6
8413 && i.index_reg->reg_num >= 6
8414 && i.log2_scale_factor == 0))))
8421 /* Handle vector immediates. */
8424 RC_SAE_immediate (const char *imm_start)
8426 unsigned int match_found, j;
8427 const char *pstr = imm_start;
8435 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8437 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8441 rc_op.type = RC_NamesTable[j].type;
8442 rc_op.operand = this_operand;
8443 i.rounding = &rc_op;
8447 as_bad (_("duplicated `%s'"), imm_start);
8450 pstr += RC_NamesTable[j].len;
8460 as_bad (_("Missing '}': '%s'"), imm_start);
8463 /* RC/SAE immediate string should contain nothing more. */;
8466 as_bad (_("Junk after '}': '%s'"), imm_start);
8470 exp = &im_expressions[i.imm_operands++];
8471 i.op[this_operand].imms = exp;
8473 exp->X_op = O_constant;
8474 exp->X_add_number = 0;
8475 exp->X_add_symbol = (symbolS *) 0;
8476 exp->X_op_symbol = (symbolS *) 0;
8478 i.types[this_operand].bitfield.imm8 = 1;
8482 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8486 i386_att_operand (char *operand_string)
8490 char *op_string = operand_string;
8492 if (is_space_char (*op_string))
8495 /* We check for an absolute prefix (differentiating,
8496 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8497 if (*op_string == ABSOLUTE_PREFIX)
8500 if (is_space_char (*op_string))
8502 i.types[this_operand].bitfield.jumpabsolute = 1;
8505 /* Check if operand is a register. */
8506 if ((r = parse_register (op_string, &end_op)) != NULL)
8508 i386_operand_type temp;
8510 /* Check for a segment override by searching for ':' after a
8511 segment register. */
8513 if (is_space_char (*op_string))
8515 if (*op_string == ':'
8516 && (r->reg_type.bitfield.sreg2
8517 || r->reg_type.bitfield.sreg3))
8522 i.seg[i.mem_operands] = &es;
8525 i.seg[i.mem_operands] = &cs;
8528 i.seg[i.mem_operands] = &ss;
8531 i.seg[i.mem_operands] = &ds;
8534 i.seg[i.mem_operands] = &fs;
8537 i.seg[i.mem_operands] = &gs;
8541 /* Skip the ':' and whitespace. */
8543 if (is_space_char (*op_string))
8546 if (!is_digit_char (*op_string)
8547 && !is_identifier_char (*op_string)
8548 && *op_string != '('
8549 && *op_string != ABSOLUTE_PREFIX)
8551 as_bad (_("bad memory operand `%s'"), op_string);
8554 /* Handle case of %es:*foo. */
8555 if (*op_string == ABSOLUTE_PREFIX)
8558 if (is_space_char (*op_string))
8560 i.types[this_operand].bitfield.jumpabsolute = 1;
8562 goto do_memory_reference;
8565 /* Handle vector operations. */
8566 if (*op_string == '{')
8568 op_string = check_VecOperations (op_string, NULL);
8569 if (op_string == NULL)
8575 as_bad (_("junk `%s' after register"), op_string);
8579 temp.bitfield.baseindex = 0;
8580 i.types[this_operand] = operand_type_or (i.types[this_operand],
8582 i.types[this_operand].bitfield.unspecified = 0;
8583 i.op[this_operand].regs = r;
8586 else if (*op_string == REGISTER_PREFIX)
8588 as_bad (_("bad register name `%s'"), op_string);
8591 else if (*op_string == IMMEDIATE_PREFIX)
8594 if (i.types[this_operand].bitfield.jumpabsolute)
8596 as_bad (_("immediate operand illegal with absolute jump"));
8599 if (!i386_immediate (op_string))
8602 else if (RC_SAE_immediate (operand_string))
8604 /* If it is a RC or SAE immediate, do nothing. */
8607 else if (is_digit_char (*op_string)
8608 || is_identifier_char (*op_string)
8609 || *op_string == '(')
8611 /* This is a memory reference of some sort. */
8614 /* Start and end of displacement string expression (if found). */
8615 char *displacement_string_start;
8616 char *displacement_string_end;
8619 do_memory_reference:
8620 if ((i.mem_operands == 1
8621 && !current_templates->start->opcode_modifier.isstring)
8622 || i.mem_operands == 2)
8624 as_bad (_("too many memory references for `%s'"),
8625 current_templates->start->name);
8629 /* Check for base index form. We detect the base index form by
8630 looking for an ')' at the end of the operand, searching
8631 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8633 base_string = op_string + strlen (op_string);
8635 /* Handle vector operations. */
8636 vop_start = strchr (op_string, '{');
8637 if (vop_start && vop_start < base_string)
8639 if (check_VecOperations (vop_start, base_string) == NULL)
8641 base_string = vop_start;
8645 if (is_space_char (*base_string))
8648 /* If we only have a displacement, set-up for it to be parsed later. */
8649 displacement_string_start = op_string;
8650 displacement_string_end = base_string + 1;
8652 if (*base_string == ')')
8655 unsigned int parens_balanced = 1;
8656 /* We've already checked that the number of left & right ()'s are
8657 equal, so this loop will not be infinite. */
8661 if (*base_string == ')')
8663 if (*base_string == '(')
8666 while (parens_balanced);
8668 temp_string = base_string;
8670 /* Skip past '(' and whitespace. */
8672 if (is_space_char (*base_string))
8675 if (*base_string == ','
8676 || ((i.base_reg = parse_register (base_string, &end_op))
8679 displacement_string_end = temp_string;
8681 i.types[this_operand].bitfield.baseindex = 1;
8685 base_string = end_op;
8686 if (is_space_char (*base_string))
8690 /* There may be an index reg or scale factor here. */
8691 if (*base_string == ',')
8694 if (is_space_char (*base_string))
8697 if ((i.index_reg = parse_register (base_string, &end_op))
8700 base_string = end_op;
8701 if (is_space_char (*base_string))
8703 if (*base_string == ',')
8706 if (is_space_char (*base_string))
8709 else if (*base_string != ')')
8711 as_bad (_("expecting `,' or `)' "
8712 "after index register in `%s'"),
8717 else if (*base_string == REGISTER_PREFIX)
8719 end_op = strchr (base_string, ',');
8722 as_bad (_("bad register name `%s'"), base_string);
8726 /* Check for scale factor. */
8727 if (*base_string != ')')
8729 char *end_scale = i386_scale (base_string);
8734 base_string = end_scale;
8735 if (is_space_char (*base_string))
8737 if (*base_string != ')')
8739 as_bad (_("expecting `)' "
8740 "after scale factor in `%s'"),
8745 else if (!i.index_reg)
8747 as_bad (_("expecting index register or scale factor "
8748 "after `,'; got '%c'"),
8753 else if (*base_string != ')')
8755 as_bad (_("expecting `,' or `)' "
8756 "after base register in `%s'"),
8761 else if (*base_string == REGISTER_PREFIX)
8763 end_op = strchr (base_string, ',');
8766 as_bad (_("bad register name `%s'"), base_string);
8771 /* If there's an expression beginning the operand, parse it,
8772 assuming displacement_string_start and
8773 displacement_string_end are meaningful. */
8774 if (displacement_string_start != displacement_string_end)
8776 if (!i386_displacement (displacement_string_start,
8777 displacement_string_end))
8781 /* Special case for (%dx) while doing input/output op. */
8783 && operand_type_equal (&i.base_reg->reg_type,
8784 ®16_inoutportreg)
8786 && i.log2_scale_factor == 0
8787 && i.seg[i.mem_operands] == 0
8788 && !operand_type_check (i.types[this_operand], disp))
8790 i.types[this_operand] = inoutportreg;
8794 if (i386_index_check (operand_string) == 0)
8796 i.types[this_operand].bitfield.mem = 1;
8801 /* It's not a memory operand; argh! */
8802 as_bad (_("invalid char %s beginning operand %d `%s'"),
8803 output_invalid (*op_string),
8808 return 1; /* Normal return. */
8811 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8812 that an rs_machine_dependent frag may reach. */
8815 i386_frag_max_var (fragS *frag)
8817 /* The only relaxable frags are for jumps.
8818 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8819 gas_assert (frag->fr_type == rs_machine_dependent);
8820 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
8823 /* md_estimate_size_before_relax()
8825 Called just before relax() for rs_machine_dependent frags. The x86
8826 assembler uses these frags to handle variable size jump
8829 Any symbol that is now undefined will not become defined.
8830 Return the correct fr_subtype in the frag.
8831 Return the initial "guess for variable size of frag" to caller.
8832 The guess is actually the growth beyond the fixed part. Whatever
8833 we do to grow the fixed or variable part contributes to our
8837 md_estimate_size_before_relax (fragS *fragP, segT segment)
8839 /* We've already got fragP->fr_subtype right; all we have to do is
8840 check for un-relaxable symbols. On an ELF system, we can't relax
8841 an externally visible symbol, because it may be overridden by a
8843 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
8844 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8846 && (S_IS_EXTERNAL (fragP->fr_symbol)
8847 || S_IS_WEAK (fragP->fr_symbol)
8848 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
8849 & BSF_GNU_INDIRECT_FUNCTION))))
8851 #if defined (OBJ_COFF) && defined (TE_PE)
8852 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
8853 && S_IS_WEAK (fragP->fr_symbol))
8857 /* Symbol is undefined in this segment, or we need to keep a
8858 reloc so that weak symbols can be overridden. */
8859 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
8860 enum bfd_reloc_code_real reloc_type;
8861 unsigned char *opcode;
8864 if (fragP->fr_var != NO_RELOC)
8865 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
8867 reloc_type = BFD_RELOC_16_PCREL;
8869 reloc_type = BFD_RELOC_32_PCREL;
8871 old_fr_fix = fragP->fr_fix;
8872 opcode = (unsigned char *) fragP->fr_opcode;
8874 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
8877 /* Make jmp (0xeb) a (d)word displacement jump. */
8879 fragP->fr_fix += size;
8880 fix_new (fragP, old_fr_fix, size,
8882 fragP->fr_offset, 1,
8888 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
8890 /* Negate the condition, and branch past an
8891 unconditional jump. */
8894 /* Insert an unconditional jump. */
8896 /* We added two extra opcode bytes, and have a two byte
8898 fragP->fr_fix += 2 + 2;
8899 fix_new (fragP, old_fr_fix + 2, 2,
8901 fragP->fr_offset, 1,
8908 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
8913 fixP = fix_new (fragP, old_fr_fix, 1,
8915 fragP->fr_offset, 1,
8917 fixP->fx_signed = 1;
8921 /* This changes the byte-displacement jump 0x7N
8922 to the (d)word-displacement jump 0x0f,0x8N. */
8923 opcode[1] = opcode[0] + 0x10;
8924 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8925 /* We've added an opcode byte. */
8926 fragP->fr_fix += 1 + size;
8927 fix_new (fragP, old_fr_fix + 1, size,
8929 fragP->fr_offset, 1,
8934 BAD_CASE (fragP->fr_subtype);
8938 return fragP->fr_fix - old_fr_fix;
8941 /* Guess size depending on current relax state. Initially the relax
8942 state will correspond to a short jump and we return 1, because
8943 the variable part of the frag (the branch offset) is one byte
8944 long. However, we can relax a section more than once and in that
8945 case we must either set fr_subtype back to the unrelaxed state,
8946 or return the value for the appropriate branch. */
8947 return md_relax_table[fragP->fr_subtype].rlx_length;
8950 /* Called after relax() is finished.
8952 In: Address of frag.
8953 fr_type == rs_machine_dependent.
8954 fr_subtype is what the address relaxed to.
8956 Out: Any fixSs and constants are set up.
8957 Caller will turn frag into a ".space 0". */
8960 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8963 unsigned char *opcode;
8964 unsigned char *where_to_put_displacement = NULL;
8965 offsetT target_address;
8966 offsetT opcode_address;
8967 unsigned int extension = 0;
8968 offsetT displacement_from_opcode_start;
8970 opcode = (unsigned char *) fragP->fr_opcode;
8972 /* Address we want to reach in file space. */
8973 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8975 /* Address opcode resides at in file space. */
8976 opcode_address = fragP->fr_address + fragP->fr_fix;
8978 /* Displacement from opcode start to fill into instruction. */
8979 displacement_from_opcode_start = target_address - opcode_address;
8981 if ((fragP->fr_subtype & BIG) == 0)
8983 /* Don't have to change opcode. */
8984 extension = 1; /* 1 opcode + 1 displacement */
8985 where_to_put_displacement = &opcode[1];
8989 if (no_cond_jump_promotion
8990 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8991 as_warn_where (fragP->fr_file, fragP->fr_line,
8992 _("long jump required"));
8994 switch (fragP->fr_subtype)
8996 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8997 extension = 4; /* 1 opcode + 4 displacement */
8999 where_to_put_displacement = &opcode[1];
9002 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
9003 extension = 2; /* 1 opcode + 2 displacement */
9005 where_to_put_displacement = &opcode[1];
9008 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
9009 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
9010 extension = 5; /* 2 opcode + 4 displacement */
9011 opcode[1] = opcode[0] + 0x10;
9012 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9013 where_to_put_displacement = &opcode[2];
9016 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
9017 extension = 3; /* 2 opcode + 2 displacement */
9018 opcode[1] = opcode[0] + 0x10;
9019 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9020 where_to_put_displacement = &opcode[2];
9023 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9028 where_to_put_displacement = &opcode[3];
9032 BAD_CASE (fragP->fr_subtype);
9037 /* If size if less then four we are sure that the operand fits,
9038 but if it's 4, then it could be that the displacement is larger
9040 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9042 && ((addressT) (displacement_from_opcode_start - extension
9043 + ((addressT) 1 << 31))
9044 > (((addressT) 2 << 31) - 1)))
9046 as_bad_where (fragP->fr_file, fragP->fr_line,
9047 _("jump target out of range"));
9048 /* Make us emit 0. */
9049 displacement_from_opcode_start = extension;
9051 /* Now put displacement after opcode. */
9052 md_number_to_chars ((char *) where_to_put_displacement,
9053 (valueT) (displacement_from_opcode_start - extension),
9054 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9055 fragP->fr_fix += extension;
9058 /* Apply a fixup (fixP) to segment data, once it has been determined
9059 by our caller that we have all the info we need to fix it up.
9061 Parameter valP is the pointer to the value of the bits.
9063 On the 386, immediates, displacements, and data pointers are all in
9064 the same (little-endian) format, so we don't need to care about which
9068 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9070 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9071 valueT value = *valP;
9073 #if !defined (TE_Mach)
9076 switch (fixP->fx_r_type)
9082 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9085 case BFD_RELOC_X86_64_32S:
9086 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9089 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9092 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9097 if (fixP->fx_addsy != NULL
9098 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9099 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9100 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9101 || fixP->fx_r_type == BFD_RELOC_8_PCREL
9102 || fixP->fx_r_type == BFD_RELOC_X86_64_PC32_BND)
9103 && !use_rela_relocations)
9105 /* This is a hack. There should be a better way to handle this.
9106 This covers for the fact that bfd_install_relocation will
9107 subtract the current location (for partial_inplace, PC relative
9108 relocations); see more below. */
9112 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9115 value += fixP->fx_where + fixP->fx_frag->fr_address;
9117 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9120 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9123 || (symbol_section_p (fixP->fx_addsy)
9124 && sym_seg != absolute_section))
9125 && !generic_force_reloc (fixP))
9127 /* Yes, we add the values in twice. This is because
9128 bfd_install_relocation subtracts them out again. I think
9129 bfd_install_relocation is broken, but I don't dare change
9131 value += fixP->fx_where + fixP->fx_frag->fr_address;
9135 #if defined (OBJ_COFF) && defined (TE_PE)
9136 /* For some reason, the PE format does not store a
9137 section address offset for a PC relative symbol. */
9138 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9139 || S_IS_WEAK (fixP->fx_addsy))
9140 value += md_pcrel_from (fixP);
9143 #if defined (OBJ_COFF) && defined (TE_PE)
9144 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9146 value -= S_GET_VALUE (fixP->fx_addsy);
9150 /* Fix a few things - the dynamic linker expects certain values here,
9151 and we must not disappoint it. */
9152 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9153 if (IS_ELF && fixP->fx_addsy)
9154 switch (fixP->fx_r_type)
9156 case BFD_RELOC_386_PLT32:
9157 case BFD_RELOC_X86_64_PLT32:
9158 case BFD_RELOC_X86_64_PLT32_BND:
9159 /* Make the jump instruction point to the address of the operand. At
9160 runtime we merely add the offset to the actual PLT entry. */
9164 case BFD_RELOC_386_TLS_GD:
9165 case BFD_RELOC_386_TLS_LDM:
9166 case BFD_RELOC_386_TLS_IE_32:
9167 case BFD_RELOC_386_TLS_IE:
9168 case BFD_RELOC_386_TLS_GOTIE:
9169 case BFD_RELOC_386_TLS_GOTDESC:
9170 case BFD_RELOC_X86_64_TLSGD:
9171 case BFD_RELOC_X86_64_TLSLD:
9172 case BFD_RELOC_X86_64_GOTTPOFF:
9173 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9174 value = 0; /* Fully resolved at runtime. No addend. */
9176 case BFD_RELOC_386_TLS_LE:
9177 case BFD_RELOC_386_TLS_LDO_32:
9178 case BFD_RELOC_386_TLS_LE_32:
9179 case BFD_RELOC_X86_64_DTPOFF32:
9180 case BFD_RELOC_X86_64_DTPOFF64:
9181 case BFD_RELOC_X86_64_TPOFF32:
9182 case BFD_RELOC_X86_64_TPOFF64:
9183 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9186 case BFD_RELOC_386_TLS_DESC_CALL:
9187 case BFD_RELOC_X86_64_TLSDESC_CALL:
9188 value = 0; /* Fully resolved at runtime. No addend. */
9189 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9193 case BFD_RELOC_386_GOT32:
9194 case BFD_RELOC_X86_64_GOT32:
9195 value = 0; /* Fully resolved at runtime. No addend. */
9198 case BFD_RELOC_VTABLE_INHERIT:
9199 case BFD_RELOC_VTABLE_ENTRY:
9206 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9208 #endif /* !defined (TE_Mach) */
9210 /* Are we finished with this relocation now? */
9211 if (fixP->fx_addsy == NULL)
9213 #if defined (OBJ_COFF) && defined (TE_PE)
9214 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9217 /* Remember value for tc_gen_reloc. */
9218 fixP->fx_addnumber = value;
9219 /* Clear out the frag for now. */
9223 else if (use_rela_relocations)
9225 fixP->fx_no_overflow = 1;
9226 /* Remember value for tc_gen_reloc. */
9227 fixP->fx_addnumber = value;
9231 md_number_to_chars (p, value, fixP->fx_size);
9235 md_atof (int type, char *litP, int *sizeP)
9237 /* This outputs the LITTLENUMs in REVERSE order;
9238 in accord with the bigendian 386. */
9239 return ieee_md_atof (type, litP, sizeP, FALSE);
9242 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9245 output_invalid (int c)
9248 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9251 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9252 "(0x%x)", (unsigned char) c);
9253 return output_invalid_buf;
9256 /* REG_STRING starts *before* REGISTER_PREFIX. */
9258 static const reg_entry *
9259 parse_real_register (char *reg_string, char **end_op)
9261 char *s = reg_string;
9263 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9266 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9267 if (*s == REGISTER_PREFIX)
9270 if (is_space_char (*s))
9274 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9276 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9277 return (const reg_entry *) NULL;
9281 /* For naked regs, make sure that we are not dealing with an identifier.
9282 This prevents confusing an identifier like `eax_var' with register
9284 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9285 return (const reg_entry *) NULL;
9289 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9291 /* Handle floating point regs, allowing spaces in the (i) part. */
9292 if (r == i386_regtab /* %st is first entry of table */)
9294 if (is_space_char (*s))
9299 if (is_space_char (*s))
9301 if (*s >= '0' && *s <= '7')
9305 if (is_space_char (*s))
9310 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9315 /* We have "%st(" then garbage. */
9316 return (const reg_entry *) NULL;
9320 if (r == NULL || allow_pseudo_reg)
9323 if (operand_type_all_zero (&r->reg_type))
9324 return (const reg_entry *) NULL;
9326 if ((r->reg_type.bitfield.reg32
9327 || r->reg_type.bitfield.sreg3
9328 || r->reg_type.bitfield.control
9329 || r->reg_type.bitfield.debug
9330 || r->reg_type.bitfield.test)
9331 && !cpu_arch_flags.bitfield.cpui386)
9332 return (const reg_entry *) NULL;
9334 if (r->reg_type.bitfield.floatreg
9335 && !cpu_arch_flags.bitfield.cpu8087
9336 && !cpu_arch_flags.bitfield.cpu287
9337 && !cpu_arch_flags.bitfield.cpu387)
9338 return (const reg_entry *) NULL;
9340 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
9341 return (const reg_entry *) NULL;
9343 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
9344 return (const reg_entry *) NULL;
9346 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
9347 return (const reg_entry *) NULL;
9349 if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
9350 && !cpu_arch_flags.bitfield.cpuavx512f)
9351 return (const reg_entry *) NULL;
9353 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9354 if (!allow_index_reg
9355 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9356 return (const reg_entry *) NULL;
9358 /* Upper 16 vector register is only available with VREX in 64bit
9360 if ((r->reg_flags & RegVRex))
9362 if (!cpu_arch_flags.bitfield.cpuvrex
9363 || flag_code != CODE_64BIT)
9364 return (const reg_entry *) NULL;
9369 if (((r->reg_flags & (RegRex64 | RegRex))
9370 || r->reg_type.bitfield.reg64)
9371 && (!cpu_arch_flags.bitfield.cpulm
9372 || !operand_type_equal (&r->reg_type, &control))
9373 && flag_code != CODE_64BIT)
9374 return (const reg_entry *) NULL;
9376 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9377 return (const reg_entry *) NULL;
9382 /* REG_STRING starts *before* REGISTER_PREFIX. */
9384 static const reg_entry *
9385 parse_register (char *reg_string, char **end_op)
9389 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9390 r = parse_real_register (reg_string, end_op);
9395 char *save = input_line_pointer;
9399 input_line_pointer = reg_string;
9400 c = get_symbol_end ();
9401 symbolP = symbol_find (reg_string);
9402 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9404 const expressionS *e = symbol_get_value_expression (symbolP);
9406 know (e->X_op == O_register);
9407 know (e->X_add_number >= 0
9408 && (valueT) e->X_add_number < i386_regtab_size);
9409 r = i386_regtab + e->X_add_number;
9410 *end_op = input_line_pointer;
9412 *input_line_pointer = c;
9413 input_line_pointer = save;
9419 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9422 char *end = input_line_pointer;
9425 r = parse_register (name, &input_line_pointer);
9426 if (r && end <= input_line_pointer)
9428 *nextcharP = *input_line_pointer;
9429 *input_line_pointer = 0;
9430 e->X_op = O_register;
9431 e->X_add_number = r - i386_regtab;
9434 input_line_pointer = end;
9436 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9440 md_operand (expressionS *e)
9445 switch (*input_line_pointer)
9447 case REGISTER_PREFIX:
9448 r = parse_real_register (input_line_pointer, &end);
9451 e->X_op = O_register;
9452 e->X_add_number = r - i386_regtab;
9453 input_line_pointer = end;
9458 gas_assert (intel_syntax);
9459 end = input_line_pointer++;
9461 if (*input_line_pointer == ']')
9463 ++input_line_pointer;
9464 e->X_op_symbol = make_expr_symbol (e);
9465 e->X_add_symbol = NULL;
9466 e->X_add_number = 0;
9472 input_line_pointer = end;
9479 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9480 const char *md_shortopts = "kVQ:sqn";
9482 const char *md_shortopts = "qn";
9485 #define OPTION_32 (OPTION_MD_BASE + 0)
9486 #define OPTION_64 (OPTION_MD_BASE + 1)
9487 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9488 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9489 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9490 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9491 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9492 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9493 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9494 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9495 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9496 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9497 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9498 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9499 #define OPTION_X32 (OPTION_MD_BASE + 14)
9500 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9501 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9502 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9504 struct option md_longopts[] =
9506 {"32", no_argument, NULL, OPTION_32},
9507 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9508 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9509 {"64", no_argument, NULL, OPTION_64},
9511 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9512 {"x32", no_argument, NULL, OPTION_X32},
9514 {"divide", no_argument, NULL, OPTION_DIVIDE},
9515 {"march", required_argument, NULL, OPTION_MARCH},
9516 {"mtune", required_argument, NULL, OPTION_MTUNE},
9517 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9518 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9519 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9520 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9521 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9522 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9523 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9524 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9525 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9526 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9527 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9528 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9529 {NULL, no_argument, NULL, 0}
9531 size_t md_longopts_size = sizeof (md_longopts);
9534 md_parse_option (int c, char *arg)
9542 optimize_align_code = 0;
9549 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9550 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9551 should be emitted or not. FIXME: Not implemented. */
9555 /* -V: SVR4 argument to print version ID. */
9557 print_version_id ();
9560 /* -k: Ignore for FreeBSD compatibility. */
9565 /* -s: On i386 Solaris, this tells the native assembler to use
9566 .stab instead of .stab.excl. We always use .stab anyhow. */
9569 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9570 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9573 const char **list, **l;
9575 list = bfd_target_list ();
9576 for (l = list; *l != NULL; l++)
9577 if (CONST_STRNEQ (*l, "elf64-x86-64")
9578 || strcmp (*l, "coff-x86-64") == 0
9579 || strcmp (*l, "pe-x86-64") == 0
9580 || strcmp (*l, "pei-x86-64") == 0
9581 || strcmp (*l, "mach-o-x86-64") == 0)
9583 default_arch = "x86_64";
9587 as_fatal (_("no compiled in support for x86_64"));
9593 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9597 const char **list, **l;
9599 list = bfd_target_list ();
9600 for (l = list; *l != NULL; l++)
9601 if (CONST_STRNEQ (*l, "elf32-x86-64"))
9603 default_arch = "x86_64:32";
9607 as_fatal (_("no compiled in support for 32bit x86_64"));
9611 as_fatal (_("32bit x86_64 is only supported for ELF"));
9616 default_arch = "i386";
9620 #ifdef SVR4_COMMENT_CHARS
9625 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
9627 for (s = i386_comment_chars; *s != '\0'; s++)
9631 i386_comment_chars = n;
9637 arch = xstrdup (arg);
9641 as_fatal (_("invalid -march= option: `%s'"), arg);
9642 next = strchr (arch, '+');
9645 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9647 if (strcmp (arch, cpu_arch [j].name) == 0)
9650 if (! cpu_arch[j].flags.bitfield.cpui386)
9653 cpu_arch_name = cpu_arch[j].name;
9654 cpu_sub_arch_name = NULL;
9655 cpu_arch_flags = cpu_arch[j].flags;
9656 cpu_arch_isa = cpu_arch[j].type;
9657 cpu_arch_isa_flags = cpu_arch[j].flags;
9658 if (!cpu_arch_tune_set)
9660 cpu_arch_tune = cpu_arch_isa;
9661 cpu_arch_tune_flags = cpu_arch_isa_flags;
9665 else if (*cpu_arch [j].name == '.'
9666 && strcmp (arch, cpu_arch [j].name + 1) == 0)
9668 /* ISA entension. */
9669 i386_cpu_flags flags;
9671 if (!cpu_arch[j].negated)
9672 flags = cpu_flags_or (cpu_arch_flags,
9675 flags = cpu_flags_and_not (cpu_arch_flags,
9677 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
9679 if (cpu_sub_arch_name)
9681 char *name = cpu_sub_arch_name;
9682 cpu_sub_arch_name = concat (name,
9684 (const char *) NULL);
9688 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
9689 cpu_arch_flags = flags;
9690 cpu_arch_isa_flags = flags;
9696 if (j >= ARRAY_SIZE (cpu_arch))
9697 as_fatal (_("invalid -march= option: `%s'"), arg);
9701 while (next != NULL );
9706 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9707 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9709 if (strcmp (arg, cpu_arch [j].name) == 0)
9711 cpu_arch_tune_set = 1;
9712 cpu_arch_tune = cpu_arch [j].type;
9713 cpu_arch_tune_flags = cpu_arch[j].flags;
9717 if (j >= ARRAY_SIZE (cpu_arch))
9718 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9721 case OPTION_MMNEMONIC:
9722 if (strcasecmp (arg, "att") == 0)
9724 else if (strcasecmp (arg, "intel") == 0)
9727 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
9730 case OPTION_MSYNTAX:
9731 if (strcasecmp (arg, "att") == 0)
9733 else if (strcasecmp (arg, "intel") == 0)
9736 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
9739 case OPTION_MINDEX_REG:
9740 allow_index_reg = 1;
9743 case OPTION_MNAKED_REG:
9744 allow_naked_reg = 1;
9747 case OPTION_MOLD_GCC:
9751 case OPTION_MSSE2AVX:
9755 case OPTION_MSSE_CHECK:
9756 if (strcasecmp (arg, "error") == 0)
9757 sse_check = check_error;
9758 else if (strcasecmp (arg, "warning") == 0)
9759 sse_check = check_warning;
9760 else if (strcasecmp (arg, "none") == 0)
9761 sse_check = check_none;
9763 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
9766 case OPTION_MOPERAND_CHECK:
9767 if (strcasecmp (arg, "error") == 0)
9768 operand_check = check_error;
9769 else if (strcasecmp (arg, "warning") == 0)
9770 operand_check = check_warning;
9771 else if (strcasecmp (arg, "none") == 0)
9772 operand_check = check_none;
9774 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
9777 case OPTION_MAVXSCALAR:
9778 if (strcasecmp (arg, "128") == 0)
9780 else if (strcasecmp (arg, "256") == 0)
9783 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
9786 case OPTION_MADD_BND_PREFIX:
9790 case OPTION_MEVEXLIG:
9791 if (strcmp (arg, "128") == 0)
9793 else if (strcmp (arg, "256") == 0)
9795 else if (strcmp (arg, "512") == 0)
9798 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
9801 case OPTION_MEVEXWIG:
9802 if (strcmp (arg, "0") == 0)
9804 else if (strcmp (arg, "1") == 0)
9807 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
9816 #define MESSAGE_TEMPLATE \
9820 show_arch (FILE *stream, int ext, int check)
9822 static char message[] = MESSAGE_TEMPLATE;
9823 char *start = message + 27;
9825 int size = sizeof (MESSAGE_TEMPLATE);
9832 left = size - (start - message);
9833 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9835 /* Should it be skipped? */
9836 if (cpu_arch [j].skip)
9839 name = cpu_arch [j].name;
9840 len = cpu_arch [j].len;
9843 /* It is an extension. Skip if we aren't asked to show it. */
9854 /* It is an processor. Skip if we show only extension. */
9857 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
9859 /* It is an impossible processor - skip. */
9863 /* Reserve 2 spaces for ", " or ",\0" */
9866 /* Check if there is any room. */
9874 p = mempcpy (p, name, len);
9878 /* Output the current message now and start a new one. */
9881 fprintf (stream, "%s\n", message);
9883 left = size - (start - message) - len - 2;
9885 gas_assert (left >= 0);
9887 p = mempcpy (p, name, len);
9892 fprintf (stream, "%s\n", message);
9896 md_show_usage (FILE *stream)
9898 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9899 fprintf (stream, _("\
9901 -V print assembler version number\n\
9904 fprintf (stream, _("\
9905 -n Do not optimize code alignment\n\
9906 -q quieten some warnings\n"));
9907 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9908 fprintf (stream, _("\
9911 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9912 || defined (TE_PE) || defined (TE_PEP))
9913 fprintf (stream, _("\
9914 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
9916 #ifdef SVR4_COMMENT_CHARS
9917 fprintf (stream, _("\
9918 --divide do not treat `/' as a comment character\n"));
9920 fprintf (stream, _("\
9921 --divide ignored\n"));
9923 fprintf (stream, _("\
9924 -march=CPU[,+EXTENSION...]\n\
9925 generate code for CPU and EXTENSION, CPU is one of:\n"));
9926 show_arch (stream, 0, 1);
9927 fprintf (stream, _("\
9928 EXTENSION is combination of:\n"));
9929 show_arch (stream, 1, 0);
9930 fprintf (stream, _("\
9931 -mtune=CPU optimize for CPU, CPU is one of:\n"));
9932 show_arch (stream, 0, 0);
9933 fprintf (stream, _("\
9934 -msse2avx encode SSE instructions with VEX prefix\n"));
9935 fprintf (stream, _("\
9936 -msse-check=[none|error|warning]\n\
9937 check SSE instructions\n"));
9938 fprintf (stream, _("\
9939 -moperand-check=[none|error|warning]\n\
9940 check operand combinations for validity\n"));
9941 fprintf (stream, _("\
9942 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
9944 fprintf (stream, _("\
9945 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
9947 fprintf (stream, _("\
9948 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
9949 for EVEX.W bit ignored instructions\n"));
9950 fprintf (stream, _("\
9951 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
9952 fprintf (stream, _("\
9953 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
9954 fprintf (stream, _("\
9955 -mindex-reg support pseudo index registers\n"));
9956 fprintf (stream, _("\
9957 -mnaked-reg don't require `%%' prefix for registers\n"));
9958 fprintf (stream, _("\
9959 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
9960 fprintf (stream, _("\
9961 -madd-bnd-prefix add BND prefix for all valid branches\n"));
9964 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
9965 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9966 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9968 /* Pick the target format to use. */
9971 i386_target_format (void)
9973 if (!strncmp (default_arch, "x86_64", 6))
9975 update_code_flag (CODE_64BIT, 1);
9976 if (default_arch[6] == '\0')
9977 x86_elf_abi = X86_64_ABI;
9979 x86_elf_abi = X86_64_X32_ABI;
9981 else if (!strcmp (default_arch, "i386"))
9982 update_code_flag (CODE_32BIT, 1);
9984 as_fatal (_("unknown architecture"));
9986 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9987 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9988 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9989 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9991 switch (OUTPUT_FLAVOR)
9993 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9994 case bfd_target_aout_flavour:
9995 return AOUT_TARGET_FORMAT;
9997 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9998 # if defined (TE_PE) || defined (TE_PEP)
9999 case bfd_target_coff_flavour:
10000 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
10001 # elif defined (TE_GO32)
10002 case bfd_target_coff_flavour:
10003 return "coff-go32";
10005 case bfd_target_coff_flavour:
10006 return "coff-i386";
10009 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10010 case bfd_target_elf_flavour:
10012 const char *format;
10014 switch (x86_elf_abi)
10017 format = ELF_TARGET_FORMAT;
10020 use_rela_relocations = 1;
10022 format = ELF_TARGET_FORMAT64;
10024 case X86_64_X32_ABI:
10025 use_rela_relocations = 1;
10027 disallow_64bit_reloc = 1;
10028 format = ELF_TARGET_FORMAT32;
10031 if (cpu_arch_isa == PROCESSOR_L1OM)
10033 if (x86_elf_abi != X86_64_ABI)
10034 as_fatal (_("Intel L1OM is 64bit only"));
10035 return ELF_TARGET_L1OM_FORMAT;
10037 if (cpu_arch_isa == PROCESSOR_K1OM)
10039 if (x86_elf_abi != X86_64_ABI)
10040 as_fatal (_("Intel K1OM is 64bit only"));
10041 return ELF_TARGET_K1OM_FORMAT;
10047 #if defined (OBJ_MACH_O)
10048 case bfd_target_mach_o_flavour:
10049 if (flag_code == CODE_64BIT)
10051 use_rela_relocations = 1;
10053 return "mach-o-x86-64";
10056 return "mach-o-i386";
10064 #endif /* OBJ_MAYBE_ more than one */
10066 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
10068 i386_elf_emit_arch_note (void)
10070 if (IS_ELF && cpu_arch_name != NULL)
10073 asection *seg = now_seg;
10074 subsegT subseg = now_subseg;
10075 Elf_Internal_Note i_note;
10076 Elf_External_Note e_note;
10077 asection *note_secp;
10080 /* Create the .note section. */
10081 note_secp = subseg_new (".note", 0);
10082 bfd_set_section_flags (stdoutput,
10084 SEC_HAS_CONTENTS | SEC_READONLY);
10086 /* Process the arch string. */
10087 len = strlen (cpu_arch_name);
10089 i_note.namesz = len + 1;
10091 i_note.type = NT_ARCH;
10092 p = frag_more (sizeof (e_note.namesz));
10093 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
10094 p = frag_more (sizeof (e_note.descsz));
10095 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
10096 p = frag_more (sizeof (e_note.type));
10097 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
10098 p = frag_more (len + 1);
10099 strcpy (p, cpu_arch_name);
10101 frag_align (2, 0, 0);
10103 subseg_set (seg, subseg);
10109 md_undefined_symbol (char *name)
10111 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10112 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10113 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10114 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10118 if (symbol_find (name))
10119 as_bad (_("GOT already in symbol table"));
10120 GOT_symbol = symbol_new (name, undefined_section,
10121 (valueT) 0, &zero_address_frag);
10128 /* Round up a section size to the appropriate boundary. */
10131 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10133 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10134 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10136 /* For a.out, force the section size to be aligned. If we don't do
10137 this, BFD will align it for us, but it will not write out the
10138 final bytes of the section. This may be a bug in BFD, but it is
10139 easier to fix it here since that is how the other a.out targets
10143 align = bfd_get_section_alignment (stdoutput, segment);
10144 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
10151 /* On the i386, PC-relative offsets are relative to the start of the
10152 next instruction. That is, the address of the offset, plus its
10153 size, since the offset is always the last part of the insn. */
10156 md_pcrel_from (fixS *fixP)
10158 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10164 s_bss (int ignore ATTRIBUTE_UNUSED)
10168 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10170 obj_elf_section_change_hook ();
10172 temp = get_absolute_expression ();
10173 subseg_set (bss_section, (subsegT) temp);
10174 demand_empty_rest_of_line ();
10180 i386_validate_fix (fixS *fixp)
10182 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
10184 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10188 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10193 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10195 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10197 fixp->fx_subsy = 0;
10202 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10205 bfd_reloc_code_real_type code;
10207 switch (fixp->fx_r_type)
10209 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10210 case BFD_RELOC_SIZE32:
10211 case BFD_RELOC_SIZE64:
10212 if (S_IS_DEFINED (fixp->fx_addsy)
10213 && !S_IS_EXTERNAL (fixp->fx_addsy))
10215 /* Resolve size relocation against local symbol to size of
10216 the symbol plus addend. */
10217 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10218 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10219 && !fits_in_unsigned_long (value))
10220 as_bad_where (fixp->fx_file, fixp->fx_line,
10221 _("symbol size computation overflow"));
10222 fixp->fx_addsy = NULL;
10223 fixp->fx_subsy = NULL;
10224 md_apply_fix (fixp, (valueT *) &value, NULL);
10229 case BFD_RELOC_X86_64_PLT32:
10230 case BFD_RELOC_X86_64_PLT32_BND:
10231 case BFD_RELOC_X86_64_GOT32:
10232 case BFD_RELOC_X86_64_GOTPCREL:
10233 case BFD_RELOC_386_PLT32:
10234 case BFD_RELOC_386_GOT32:
10235 case BFD_RELOC_386_GOTOFF:
10236 case BFD_RELOC_386_GOTPC:
10237 case BFD_RELOC_386_TLS_GD:
10238 case BFD_RELOC_386_TLS_LDM:
10239 case BFD_RELOC_386_TLS_LDO_32:
10240 case BFD_RELOC_386_TLS_IE_32:
10241 case BFD_RELOC_386_TLS_IE:
10242 case BFD_RELOC_386_TLS_GOTIE:
10243 case BFD_RELOC_386_TLS_LE_32:
10244 case BFD_RELOC_386_TLS_LE:
10245 case BFD_RELOC_386_TLS_GOTDESC:
10246 case BFD_RELOC_386_TLS_DESC_CALL:
10247 case BFD_RELOC_X86_64_TLSGD:
10248 case BFD_RELOC_X86_64_TLSLD:
10249 case BFD_RELOC_X86_64_DTPOFF32:
10250 case BFD_RELOC_X86_64_DTPOFF64:
10251 case BFD_RELOC_X86_64_GOTTPOFF:
10252 case BFD_RELOC_X86_64_TPOFF32:
10253 case BFD_RELOC_X86_64_TPOFF64:
10254 case BFD_RELOC_X86_64_GOTOFF64:
10255 case BFD_RELOC_X86_64_GOTPC32:
10256 case BFD_RELOC_X86_64_GOT64:
10257 case BFD_RELOC_X86_64_GOTPCREL64:
10258 case BFD_RELOC_X86_64_GOTPC64:
10259 case BFD_RELOC_X86_64_GOTPLT64:
10260 case BFD_RELOC_X86_64_PLTOFF64:
10261 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10262 case BFD_RELOC_X86_64_TLSDESC_CALL:
10263 case BFD_RELOC_RVA:
10264 case BFD_RELOC_VTABLE_ENTRY:
10265 case BFD_RELOC_VTABLE_INHERIT:
10267 case BFD_RELOC_32_SECREL:
10269 code = fixp->fx_r_type;
10271 case BFD_RELOC_X86_64_32S:
10272 if (!fixp->fx_pcrel)
10274 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10275 code = fixp->fx_r_type;
10279 if (fixp->fx_pcrel)
10281 switch (fixp->fx_size)
10284 as_bad_where (fixp->fx_file, fixp->fx_line,
10285 _("can not do %d byte pc-relative relocation"),
10287 code = BFD_RELOC_32_PCREL;
10289 case 1: code = BFD_RELOC_8_PCREL; break;
10290 case 2: code = BFD_RELOC_16_PCREL; break;
10292 code = (fixp->fx_r_type == BFD_RELOC_X86_64_PC32_BND
10293 ? fixp-> fx_r_type : BFD_RELOC_32_PCREL);
10296 case 8: code = BFD_RELOC_64_PCREL; break;
10302 switch (fixp->fx_size)
10305 as_bad_where (fixp->fx_file, fixp->fx_line,
10306 _("can not do %d byte relocation"),
10308 code = BFD_RELOC_32;
10310 case 1: code = BFD_RELOC_8; break;
10311 case 2: code = BFD_RELOC_16; break;
10312 case 4: code = BFD_RELOC_32; break;
10314 case 8: code = BFD_RELOC_64; break;
10321 if ((code == BFD_RELOC_32
10322 || code == BFD_RELOC_32_PCREL
10323 || code == BFD_RELOC_X86_64_32S)
10325 && fixp->fx_addsy == GOT_symbol)
10328 code = BFD_RELOC_386_GOTPC;
10330 code = BFD_RELOC_X86_64_GOTPC32;
10332 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10334 && fixp->fx_addsy == GOT_symbol)
10336 code = BFD_RELOC_X86_64_GOTPC64;
10339 rel = (arelent *) xmalloc (sizeof (arelent));
10340 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10341 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10343 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10345 if (!use_rela_relocations)
10347 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10348 vtable entry to be used in the relocation's section offset. */
10349 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10350 rel->address = fixp->fx_offset;
10351 #if defined (OBJ_COFF) && defined (TE_PE)
10352 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10353 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10358 /* Use the rela in 64bit mode. */
10361 if (disallow_64bit_reloc)
10364 case BFD_RELOC_X86_64_DTPOFF64:
10365 case BFD_RELOC_X86_64_TPOFF64:
10366 case BFD_RELOC_64_PCREL:
10367 case BFD_RELOC_X86_64_GOTOFF64:
10368 case BFD_RELOC_X86_64_GOT64:
10369 case BFD_RELOC_X86_64_GOTPCREL64:
10370 case BFD_RELOC_X86_64_GOTPC64:
10371 case BFD_RELOC_X86_64_GOTPLT64:
10372 case BFD_RELOC_X86_64_PLTOFF64:
10373 as_bad_where (fixp->fx_file, fixp->fx_line,
10374 _("cannot represent relocation type %s in x32 mode"),
10375 bfd_get_reloc_code_name (code));
10381 if (!fixp->fx_pcrel)
10382 rel->addend = fixp->fx_offset;
10386 case BFD_RELOC_X86_64_PLT32:
10387 case BFD_RELOC_X86_64_PLT32_BND:
10388 case BFD_RELOC_X86_64_GOT32:
10389 case BFD_RELOC_X86_64_GOTPCREL:
10390 case BFD_RELOC_X86_64_TLSGD:
10391 case BFD_RELOC_X86_64_TLSLD:
10392 case BFD_RELOC_X86_64_GOTTPOFF:
10393 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10394 case BFD_RELOC_X86_64_TLSDESC_CALL:
10395 rel->addend = fixp->fx_offset - fixp->fx_size;
10398 rel->addend = (section->vma
10400 + fixp->fx_addnumber
10401 + md_pcrel_from (fixp));
10406 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10407 if (rel->howto == NULL)
10409 as_bad_where (fixp->fx_file, fixp->fx_line,
10410 _("cannot represent relocation type %s"),
10411 bfd_get_reloc_code_name (code));
10412 /* Set howto to a garbage value so that we can keep going. */
10413 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10414 gas_assert (rel->howto != NULL);
10420 #include "tc-i386-intel.c"
10423 tc_x86_parse_to_dw2regnum (expressionS *exp)
10425 int saved_naked_reg;
10426 char saved_register_dot;
10428 saved_naked_reg = allow_naked_reg;
10429 allow_naked_reg = 1;
10430 saved_register_dot = register_chars['.'];
10431 register_chars['.'] = '.';
10432 allow_pseudo_reg = 1;
10433 expression_and_evaluate (exp);
10434 allow_pseudo_reg = 0;
10435 register_chars['.'] = saved_register_dot;
10436 allow_naked_reg = saved_naked_reg;
10438 if (exp->X_op == O_register && exp->X_add_number >= 0)
10440 if ((addressT) exp->X_add_number < i386_regtab_size)
10442 exp->X_op = O_constant;
10443 exp->X_add_number = i386_regtab[exp->X_add_number]
10444 .dw2_regnum[flag_code >> 1];
10447 exp->X_op = O_illegal;
10452 tc_x86_frame_initial_instructions (void)
10454 static unsigned int sp_regno[2];
10456 if (!sp_regno[flag_code >> 1])
10458 char *saved_input = input_line_pointer;
10459 char sp[][4] = {"esp", "rsp"};
10462 input_line_pointer = sp[flag_code >> 1];
10463 tc_x86_parse_to_dw2regnum (&exp);
10464 gas_assert (exp.X_op == O_constant);
10465 sp_regno[flag_code >> 1] = exp.X_add_number;
10466 input_line_pointer = saved_input;
10469 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10470 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10474 x86_dwarf2_addr_size (void)
10476 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10477 if (x86_elf_abi == X86_64_X32_ABI)
10480 return bfd_arch_bits_per_address (stdoutput) / 8;
10484 i386_elf_section_type (const char *str, size_t len)
10486 if (flag_code == CODE_64BIT
10487 && len == sizeof ("unwind") - 1
10488 && strncmp (str, "unwind", 6) == 0)
10489 return SHT_X86_64_UNWIND;
10496 i386_solaris_fix_up_eh_frame (segT sec)
10498 if (flag_code == CODE_64BIT)
10499 elf_section_type (sec) = SHT_X86_64_UNWIND;
10505 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10509 exp.X_op = O_secrel;
10510 exp.X_add_symbol = symbol;
10511 exp.X_add_number = 0;
10512 emit_expr (&exp, size);
10516 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10517 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10520 x86_64_section_letter (int letter, char **ptr_msg)
10522 if (flag_code == CODE_64BIT)
10525 return SHF_X86_64_LARGE;
10527 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10530 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
10535 x86_64_section_word (char *str, size_t len)
10537 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
10538 return SHF_X86_64_LARGE;
10544 handle_large_common (int small ATTRIBUTE_UNUSED)
10546 if (flag_code != CODE_64BIT)
10548 s_comm_internal (0, elf_common_parse);
10549 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10553 static segT lbss_section;
10554 asection *saved_com_section_ptr = elf_com_section_ptr;
10555 asection *saved_bss_section = bss_section;
10557 if (lbss_section == NULL)
10559 flagword applicable;
10560 segT seg = now_seg;
10561 subsegT subseg = now_subseg;
10563 /* The .lbss section is for local .largecomm symbols. */
10564 lbss_section = subseg_new (".lbss", 0);
10565 applicable = bfd_applicable_section_flags (stdoutput);
10566 bfd_set_section_flags (stdoutput, lbss_section,
10567 applicable & SEC_ALLOC);
10568 seg_info (lbss_section)->bss = 1;
10570 subseg_set (seg, subseg);
10573 elf_com_section_ptr = &_bfd_elf_large_com_section;
10574 bss_section = lbss_section;
10576 s_comm_internal (0, elf_common_parse);
10578 elf_com_section_ptr = saved_com_section_ptr;
10579 bss_section = saved_bss_section;
10582 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */