1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright (C) 1989-2014 Free Software Foundation, Inc.
4 This file is part of GAS, the GNU Assembler.
6 GAS is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
11 GAS is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GAS; see the file COPYING. If not, write to the Free
18 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
21 /* Intel 80386 machine specific gas.
22 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23 x86_64 support by Jan Hubicka (jh@suse.cz)
24 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25 Bugs & suggestions are completely welcome. This is free software.
26 Please help us make it better. */
29 #include "safe-ctype.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
45 #define DEFAULT_ARCH "i386"
50 #define INLINE __inline__
56 /* Prefixes will be emitted in the order defined below.
57 WAIT_PREFIX must be the first prefix since FWAIT is really is an
58 instruction, and so must come before any prefixes.
59 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
66 #define HLE_PREFIX REP_PREFIX
67 #define BND_PREFIX REP_PREFIX
69 #define REX_PREFIX 6 /* must come last. */
70 #define MAX_PREFIXES 7 /* max prefixes per opcode */
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78 memory operand size in Intel syntax. */
79 #define WORD_MNEM_SUFFIX 'w'
80 #define BYTE_MNEM_SUFFIX 'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX 'l'
83 #define QWORD_MNEM_SUFFIX 'q'
84 #define XMMWORD_MNEM_SUFFIX 'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax. Use a non-ascii letter since since it never appears
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
91 #define END_OF_INSN '\0'
94 'templates' is for grouping together 'template' structures for opcodes
95 of the same name. This is only used for storing the insns in the grand
96 ole hash table of insns.
97 The templates themselves start at START and range up to (but not including)
102 const insn_template *start;
103 const insn_template *end;
107 /* 386 operand encoding bytes: see 386 book for details of this. */
110 unsigned int regmem; /* codes register or memory operand */
111 unsigned int reg; /* codes register operand (or extended opcode) */
112 unsigned int mode; /* how to interpret regmem & reg */
116 /* x86-64 extension prefix. */
117 typedef int rex_byte;
119 /* 386 opcode byte to code indirect addressing. */
128 /* x86 arch names, types and features */
131 const char *name; /* arch name */
132 unsigned int len; /* arch string length */
133 enum processor_type type; /* arch type */
134 i386_cpu_flags flags; /* cpu feature flags */
135 unsigned int skip; /* show_arch should skip this. */
136 unsigned int negated; /* turn off indicated flags. */
140 static void update_code_flag (int, int);
141 static void set_code_flag (int);
142 static void set_16bit_gcc_code_flag (int);
143 static void set_intel_syntax (int);
144 static void set_intel_mnemonic (int);
145 static void set_allow_index_reg (int);
146 static void set_check (int);
147 static void set_cpu_arch (int);
149 static void pe_directive_secrel (int);
151 static void signed_cons (int);
152 static char *output_invalid (int c);
153 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
155 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
157 static int i386_att_operand (char *);
158 static int i386_intel_operand (char *, int);
159 static int i386_intel_simplify (expressionS *);
160 static int i386_intel_parse_name (const char *, expressionS *);
161 static const reg_entry *parse_register (char *, char **);
162 static char *parse_insn (char *, char *);
163 static char *parse_operands (char *, const char *);
164 static void swap_operands (void);
165 static void swap_2_operands (int, int);
166 static void optimize_imm (void);
167 static void optimize_disp (void);
168 static const insn_template *match_template (void);
169 static int check_string (void);
170 static int process_suffix (void);
171 static int check_byte_reg (void);
172 static int check_long_reg (void);
173 static int check_qword_reg (void);
174 static int check_word_reg (void);
175 static int finalize_imm (void);
176 static int process_operands (void);
177 static const seg_entry *build_modrm_byte (void);
178 static void output_insn (void);
179 static void output_imm (fragS *, offsetT);
180 static void output_disp (fragS *, offsetT);
182 static void s_bss (int);
184 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
185 static void handle_large_common (int small ATTRIBUTE_UNUSED);
188 static const char *default_arch = DEFAULT_ARCH;
190 /* This struct describes rounding control and SAE in the instruction. */
204 static struct RC_Operation rc_op;
206 /* The struct describes masking, applied to OPERAND in the instruction.
207 MASK is a pointer to the corresponding mask register. ZEROING tells
208 whether merging or zeroing mask is used. */
209 struct Mask_Operation
211 const reg_entry *mask;
212 unsigned int zeroing;
213 /* The operand where this operation is associated. */
217 static struct Mask_Operation mask_op;
219 /* The struct describes broadcasting, applied to OPERAND. FACTOR is
221 struct Broadcast_Operation
223 /* Type of broadcast: no broadcast, {1to8}, or {1to16}. */
226 /* Index of broadcasted operand. */
230 static struct Broadcast_Operation broadcast_op;
235 /* VEX prefix is either 2 byte or 3 byte. EVEX is 4 byte. */
236 unsigned char bytes[4];
238 /* Destination or source register specifier. */
239 const reg_entry *register_specifier;
242 /* 'md_assemble ()' gathers together information and puts it into a
249 const reg_entry *regs;
254 operand_size_mismatch,
255 operand_type_mismatch,
256 register_type_mismatch,
257 number_of_operands_mismatch,
258 invalid_instruction_suffix,
261 unsupported_with_intel_mnemonic,
264 invalid_vsib_address,
265 invalid_vector_register_set,
266 unsupported_vector_index_register,
267 unsupported_broadcast,
268 broadcast_not_on_src_operand,
271 mask_not_on_destination,
274 rc_sae_operand_not_last_imm,
275 invalid_register_operand,
281 /* TM holds the template for the insn were currently assembling. */
284 /* SUFFIX holds the instruction size suffix for byte, word, dword
285 or qword, if given. */
288 /* OPERANDS gives the number of given operands. */
289 unsigned int operands;
291 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
292 of given register, displacement, memory operands and immediate
294 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
296 /* TYPES [i] is the type (see above #defines) which tells us how to
297 use OP[i] for the corresponding operand. */
298 i386_operand_type types[MAX_OPERANDS];
300 /* Displacement expression, immediate expression, or register for each
302 union i386_op op[MAX_OPERANDS];
304 /* Flags for operands. */
305 unsigned int flags[MAX_OPERANDS];
306 #define Operand_PCrel 1
308 /* Relocation type for operand */
309 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
311 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
312 the base index byte below. */
313 const reg_entry *base_reg;
314 const reg_entry *index_reg;
315 unsigned int log2_scale_factor;
317 /* SEG gives the seg_entries of this insn. They are zero unless
318 explicit segment overrides are given. */
319 const seg_entry *seg[2];
321 /* PREFIX holds all the given prefix opcodes (usually null).
322 PREFIXES is the number of prefix opcodes. */
323 unsigned int prefixes;
324 unsigned char prefix[MAX_PREFIXES];
326 /* RM and SIB are the modrm byte and the sib byte where the
327 addressing modes of this insn are encoded. */
334 /* Masking attributes. */
335 struct Mask_Operation *mask;
337 /* Rounding control and SAE attributes. */
338 struct RC_Operation *rounding;
340 /* Broadcasting attributes. */
341 struct Broadcast_Operation *broadcast;
343 /* Compressed disp8*N attribute. */
344 unsigned int memshift;
346 /* Swap operand in encoding. */
347 unsigned int swap_operand;
349 /* Prefer 8bit or 32bit displacement in encoding. */
352 disp_encoding_default = 0,
358 const char *rep_prefix;
361 const char *hle_prefix;
363 /* Have BND prefix. */
364 const char *bnd_prefix;
366 /* Need VREX to support upper 16 registers. */
370 enum i386_error error;
373 typedef struct _i386_insn i386_insn;
375 /* Link RC type with corresponding string, that'll be looked for in
384 static const struct RC_name RC_NamesTable[] =
386 { rne, STRING_COMMA_LEN ("rn-sae") },
387 { rd, STRING_COMMA_LEN ("rd-sae") },
388 { ru, STRING_COMMA_LEN ("ru-sae") },
389 { rz, STRING_COMMA_LEN ("rz-sae") },
390 { saeonly, STRING_COMMA_LEN ("sae") },
393 /* List of chars besides those in app.c:symbol_chars that can start an
394 operand. Used to prevent the scrubber eating vital white-space. */
395 const char extra_symbol_chars[] = "*%-([{"
404 #if (defined (TE_I386AIX) \
405 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
406 && !defined (TE_GNU) \
407 && !defined (TE_LINUX) \
408 && !defined (TE_NACL) \
409 && !defined (TE_NETWARE) \
410 && !defined (TE_FreeBSD) \
411 && !defined (TE_DragonFly) \
412 && !defined (TE_NetBSD)))
413 /* This array holds the chars that always start a comment. If the
414 pre-processor is disabled, these aren't very useful. The option
415 --divide will remove '/' from this list. */
416 const char *i386_comment_chars = "#/";
417 #define SVR4_COMMENT_CHARS 1
418 #define PREFIX_SEPARATOR '\\'
421 const char *i386_comment_chars = "#";
422 #define PREFIX_SEPARATOR '/'
425 /* This array holds the chars that only start a comment at the beginning of
426 a line. If the line seems to have the form '# 123 filename'
427 .line and .file directives will appear in the pre-processed output.
428 Note that input_file.c hand checks for '#' at the beginning of the
429 first line of the input file. This is because the compiler outputs
430 #NO_APP at the beginning of its output.
431 Also note that comments started like this one will always work if
432 '/' isn't otherwise defined. */
433 const char line_comment_chars[] = "#/";
435 const char line_separator_chars[] = ";";
437 /* Chars that can be used to separate mant from exp in floating point
439 const char EXP_CHARS[] = "eE";
441 /* Chars that mean this number is a floating point constant
444 const char FLT_CHARS[] = "fFdDxX";
446 /* Tables for lexical analysis. */
447 static char mnemonic_chars[256];
448 static char register_chars[256];
449 static char operand_chars[256];
450 static char identifier_chars[256];
451 static char digit_chars[256];
453 /* Lexical macros. */
454 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
455 #define is_operand_char(x) (operand_chars[(unsigned char) x])
456 #define is_register_char(x) (register_chars[(unsigned char) x])
457 #define is_space_char(x) ((x) == ' ')
458 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
459 #define is_digit_char(x) (digit_chars[(unsigned char) x])
461 /* All non-digit non-letter characters that may occur in an operand. */
462 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
464 /* md_assemble() always leaves the strings it's passed unaltered. To
465 effect this we maintain a stack of saved characters that we've smashed
466 with '\0's (indicating end of strings for various sub-fields of the
467 assembler instruction). */
468 static char save_stack[32];
469 static char *save_stack_p;
470 #define END_STRING_AND_SAVE(s) \
471 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
472 #define RESTORE_END_STRING(s) \
473 do { *(s) = *--save_stack_p; } while (0)
475 /* The instruction we're assembling. */
478 /* Possible templates for current insn. */
479 static const templates *current_templates;
481 /* Per instruction expressionS buffers: max displacements & immediates. */
482 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
483 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
485 /* Current operand we are working on. */
486 static int this_operand = -1;
488 /* We support four different modes. FLAG_CODE variable is used to distinguish
496 static enum flag_code flag_code;
497 static unsigned int object_64bit;
498 static unsigned int disallow_64bit_reloc;
499 static int use_rela_relocations = 0;
501 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
502 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
503 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
505 /* The ELF ABI to use. */
513 static enum x86_elf_abi x86_elf_abi = I386_ABI;
516 #if defined (TE_PE) || defined (TE_PEP)
517 /* Use big object file format. */
518 static int use_big_obj = 0;
521 /* 1 for intel syntax,
523 static int intel_syntax = 0;
525 /* 1 for intel mnemonic,
526 0 if att mnemonic. */
527 static int intel_mnemonic = !SYSV386_COMPAT;
529 /* 1 if support old (<= 2.8.1) versions of gcc. */
530 static int old_gcc = OLDGCC_COMPAT;
532 /* 1 if pseudo registers are permitted. */
533 static int allow_pseudo_reg = 0;
535 /* 1 if register prefix % not required. */
536 static int allow_naked_reg = 0;
538 /* 1 if the assembler should add BND prefix for all control-tranferring
539 instructions supporting it, even if this prefix wasn't specified
541 static int add_bnd_prefix = 0;
543 /* 1 if pseudo index register, eiz/riz, is allowed . */
544 static int allow_index_reg = 0;
546 /* 1 if the assembler should ignore LOCK prefix, even if it was
547 specified explicitly. */
548 static int omit_lock_prefix = 0;
550 static enum check_kind
556 sse_check, operand_check = check_warning;
558 /* Register prefix used for error message. */
559 static const char *register_prefix = "%";
561 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
562 leave, push, and pop instructions so that gcc has the same stack
563 frame as in 32 bit mode. */
564 static char stackop_size = '\0';
566 /* Non-zero to optimize code alignment. */
567 int optimize_align_code = 1;
569 /* Non-zero to quieten some warnings. */
570 static int quiet_warnings = 0;
573 static const char *cpu_arch_name = NULL;
574 static char *cpu_sub_arch_name = NULL;
576 /* CPU feature flags. */
577 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
579 /* If we have selected a cpu we are generating instructions for. */
580 static int cpu_arch_tune_set = 0;
582 /* Cpu we are generating instructions for. */
583 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
585 /* CPU feature flags of cpu we are generating instructions for. */
586 static i386_cpu_flags cpu_arch_tune_flags;
588 /* CPU instruction set architecture used. */
589 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
591 /* CPU feature flags of instruction set architecture used. */
592 i386_cpu_flags cpu_arch_isa_flags;
594 /* If set, conditional jumps are not automatically promoted to handle
595 larger than a byte offset. */
596 static unsigned int no_cond_jump_promotion = 0;
598 /* Encode SSE instructions with VEX prefix. */
599 static unsigned int sse2avx;
601 /* Encode scalar AVX instructions with specific vector length. */
608 /* Encode scalar EVEX LIG instructions with specific vector length. */
616 /* Encode EVEX WIG instructions with specific evex.w. */
623 /* Value to encode in EVEX RC bits, for SAE-only instructions. */
624 static enum rc_type evexrcig = rne;
626 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
627 static symbolS *GOT_symbol;
629 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
630 unsigned int x86_dwarf2_return_column;
632 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
633 int x86_cie_data_alignment;
635 /* Interface to relax_segment.
636 There are 3 major relax states for 386 jump insns because the
637 different types of jumps add different sizes to frags when we're
638 figuring out what sort of jump to choose to reach a given label. */
641 #define UNCOND_JUMP 0
643 #define COND_JUMP86 2
648 #define SMALL16 (SMALL | CODE16)
650 #define BIG16 (BIG | CODE16)
654 #define INLINE __inline__
660 #define ENCODE_RELAX_STATE(type, size) \
661 ((relax_substateT) (((type) << 2) | (size)))
662 #define TYPE_FROM_RELAX_STATE(s) \
664 #define DISP_SIZE_FROM_RELAX_STATE(s) \
665 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
667 /* This table is used by relax_frag to promote short jumps to long
668 ones where necessary. SMALL (short) jumps may be promoted to BIG
669 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
670 don't allow a short jump in a 32 bit code segment to be promoted to
671 a 16 bit offset jump because it's slower (requires data size
672 prefix), and doesn't work, unless the destination is in the bottom
673 64k of the code segment (The top 16 bits of eip are zeroed). */
675 const relax_typeS md_relax_table[] =
678 1) most positive reach of this state,
679 2) most negative reach of this state,
680 3) how many bytes this mode will have in the variable part of the frag
681 4) which index into the table to try if we can't fit into this one. */
683 /* UNCOND_JUMP states. */
684 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
685 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
686 /* dword jmp adds 4 bytes to frag:
687 0 extra opcode bytes, 4 displacement bytes. */
689 /* word jmp adds 2 byte2 to frag:
690 0 extra opcode bytes, 2 displacement bytes. */
693 /* COND_JUMP states. */
694 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
695 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
696 /* dword conditionals adds 5 bytes to frag:
697 1 extra opcode byte, 4 displacement bytes. */
699 /* word conditionals add 3 bytes to frag:
700 1 extra opcode byte, 2 displacement bytes. */
703 /* COND_JUMP86 states. */
704 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
705 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
706 /* dword conditionals adds 5 bytes to frag:
707 1 extra opcode byte, 4 displacement bytes. */
709 /* word conditionals add 4 bytes to frag:
710 1 displacement byte and a 3 byte long branch insn. */
714 static const arch_entry cpu_arch[] =
716 /* Do not replace the first two entries - i386_target_format()
717 relies on them being there in this order. */
718 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
719 CPU_GENERIC32_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
721 CPU_GENERIC64_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
723 CPU_NONE_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
725 CPU_I186_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
727 CPU_I286_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
729 CPU_I386_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
731 CPU_I486_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
733 CPU_I586_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
735 CPU_I686_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
737 CPU_I586_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
739 CPU_PENTIUMPRO_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
741 CPU_P2_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
743 CPU_P3_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
745 CPU_P4_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
747 CPU_CORE_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
749 CPU_NOCONA_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
751 CPU_CORE_FLAGS, 1, 0 },
752 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
753 CPU_CORE_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
755 CPU_CORE2_FLAGS, 1, 0 },
756 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
757 CPU_CORE2_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
759 CPU_COREI7_FLAGS, 0, 0 },
760 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
761 CPU_L1OM_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
763 CPU_K1OM_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
765 CPU_K6_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
767 CPU_K6_2_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
769 CPU_ATHLON_FLAGS, 0, 0 },
770 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
771 CPU_K8_FLAGS, 1, 0 },
772 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
773 CPU_K8_FLAGS, 0, 0 },
774 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
775 CPU_K8_FLAGS, 0, 0 },
776 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
777 CPU_AMDFAM10_FLAGS, 0, 0 },
778 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
779 CPU_BDVER1_FLAGS, 0, 0 },
780 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
781 CPU_BDVER2_FLAGS, 0, 0 },
782 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
783 CPU_BDVER3_FLAGS, 0, 0 },
784 { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
785 CPU_BDVER4_FLAGS, 0, 0 },
786 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
787 CPU_BTVER1_FLAGS, 0, 0 },
788 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
789 CPU_BTVER2_FLAGS, 0, 0 },
790 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
791 CPU_8087_FLAGS, 0, 0 },
792 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
793 CPU_287_FLAGS, 0, 0 },
794 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
795 CPU_387_FLAGS, 0, 0 },
796 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
797 CPU_ANY87_FLAGS, 0, 1 },
798 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
799 CPU_MMX_FLAGS, 0, 0 },
800 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
801 CPU_3DNOWA_FLAGS, 0, 1 },
802 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
803 CPU_SSE_FLAGS, 0, 0 },
804 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
805 CPU_SSE2_FLAGS, 0, 0 },
806 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
807 CPU_SSE3_FLAGS, 0, 0 },
808 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
809 CPU_SSSE3_FLAGS, 0, 0 },
810 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
811 CPU_SSE4_1_FLAGS, 0, 0 },
812 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
813 CPU_SSE4_2_FLAGS, 0, 0 },
814 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
815 CPU_SSE4_2_FLAGS, 0, 0 },
816 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
817 CPU_ANY_SSE_FLAGS, 0, 1 },
818 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
819 CPU_AVX_FLAGS, 0, 0 },
820 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
821 CPU_AVX2_FLAGS, 0, 0 },
822 { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
823 CPU_AVX512F_FLAGS, 0, 0 },
824 { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
825 CPU_AVX512CD_FLAGS, 0, 0 },
826 { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
827 CPU_AVX512ER_FLAGS, 0, 0 },
828 { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
829 CPU_AVX512PF_FLAGS, 0, 0 },
830 { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
831 CPU_AVX512DQ_FLAGS, 0, 0 },
832 { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
833 CPU_AVX512BW_FLAGS, 0, 0 },
834 { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
835 CPU_AVX512VL_FLAGS, 0, 0 },
836 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
837 CPU_ANY_AVX_FLAGS, 0, 1 },
838 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
839 CPU_VMX_FLAGS, 0, 0 },
840 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
841 CPU_VMFUNC_FLAGS, 0, 0 },
842 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
843 CPU_SMX_FLAGS, 0, 0 },
844 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
845 CPU_XSAVE_FLAGS, 0, 0 },
846 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
847 CPU_XSAVEOPT_FLAGS, 0, 0 },
848 { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
849 CPU_XSAVEC_FLAGS, 0, 0 },
850 { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
851 CPU_XSAVES_FLAGS, 0, 0 },
852 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
853 CPU_AES_FLAGS, 0, 0 },
854 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
855 CPU_PCLMUL_FLAGS, 0, 0 },
856 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
857 CPU_PCLMUL_FLAGS, 1, 0 },
858 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
859 CPU_FSGSBASE_FLAGS, 0, 0 },
860 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
861 CPU_RDRND_FLAGS, 0, 0 },
862 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
863 CPU_F16C_FLAGS, 0, 0 },
864 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
865 CPU_BMI2_FLAGS, 0, 0 },
866 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
867 CPU_FMA_FLAGS, 0, 0 },
868 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
869 CPU_FMA4_FLAGS, 0, 0 },
870 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
871 CPU_XOP_FLAGS, 0, 0 },
872 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
873 CPU_LWP_FLAGS, 0, 0 },
874 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
875 CPU_MOVBE_FLAGS, 0, 0 },
876 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
877 CPU_CX16_FLAGS, 0, 0 },
878 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
879 CPU_EPT_FLAGS, 0, 0 },
880 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
881 CPU_LZCNT_FLAGS, 0, 0 },
882 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
883 CPU_HLE_FLAGS, 0, 0 },
884 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
885 CPU_RTM_FLAGS, 0, 0 },
886 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
887 CPU_INVPCID_FLAGS, 0, 0 },
888 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
889 CPU_CLFLUSH_FLAGS, 0, 0 },
890 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
891 CPU_NOP_FLAGS, 0, 0 },
892 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
893 CPU_SYSCALL_FLAGS, 0, 0 },
894 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
895 CPU_RDTSCP_FLAGS, 0, 0 },
896 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
897 CPU_3DNOW_FLAGS, 0, 0 },
898 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
899 CPU_3DNOWA_FLAGS, 0, 0 },
900 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
901 CPU_PADLOCK_FLAGS, 0, 0 },
902 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
903 CPU_SVME_FLAGS, 1, 0 },
904 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
905 CPU_SVME_FLAGS, 0, 0 },
906 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
907 CPU_SSE4A_FLAGS, 0, 0 },
908 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
909 CPU_ABM_FLAGS, 0, 0 },
910 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
911 CPU_BMI_FLAGS, 0, 0 },
912 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
913 CPU_TBM_FLAGS, 0, 0 },
914 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
915 CPU_ADX_FLAGS, 0, 0 },
916 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
917 CPU_RDSEED_FLAGS, 0, 0 },
918 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
919 CPU_PRFCHW_FLAGS, 0, 0 },
920 { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
921 CPU_SMAP_FLAGS, 0, 0 },
922 { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
923 CPU_MPX_FLAGS, 0, 0 },
924 { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
925 CPU_SHA_FLAGS, 0, 0 },
926 { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
927 CPU_CLFLUSHOPT_FLAGS, 0, 0 },
928 { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
929 CPU_PREFETCHWT1_FLAGS, 0, 0 },
930 { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
931 CPU_SE1_FLAGS, 0, 0 },
932 { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
933 CPU_CLWB_FLAGS, 0, 0 },
934 { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN,
935 CPU_PCOMMIT_FLAGS, 0, 0 },
936 { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
937 CPU_AVX512IFMA_FLAGS, 0, 0 },
938 { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
939 CPU_AVX512VBMI_FLAGS, 0, 0 },
943 /* Like s_lcomm_internal in gas/read.c but the alignment string
944 is allowed to be optional. */
947 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
954 && *input_line_pointer == ',')
956 align = parse_align (needs_align - 1);
958 if (align == (addressT) -1)
973 bss_alloc (symbolP, size, align);
978 pe_lcomm (int needs_align)
980 s_comm_internal (needs_align * 2, pe_lcomm_internal);
984 const pseudo_typeS md_pseudo_table[] =
986 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
987 {"align", s_align_bytes, 0},
989 {"align", s_align_ptwo, 0},
991 {"arch", set_cpu_arch, 0},
995 {"lcomm", pe_lcomm, 1},
997 {"ffloat", float_cons, 'f'},
998 {"dfloat", float_cons, 'd'},
999 {"tfloat", float_cons, 'x'},
1001 {"slong", signed_cons, 4},
1002 {"noopt", s_ignore, 0},
1003 {"optim", s_ignore, 0},
1004 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1005 {"code16", set_code_flag, CODE_16BIT},
1006 {"code32", set_code_flag, CODE_32BIT},
1007 {"code64", set_code_flag, CODE_64BIT},
1008 {"intel_syntax", set_intel_syntax, 1},
1009 {"att_syntax", set_intel_syntax, 0},
1010 {"intel_mnemonic", set_intel_mnemonic, 1},
1011 {"att_mnemonic", set_intel_mnemonic, 0},
1012 {"allow_index_reg", set_allow_index_reg, 1},
1013 {"disallow_index_reg", set_allow_index_reg, 0},
1014 {"sse_check", set_check, 0},
1015 {"operand_check", set_check, 1},
1016 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1017 {"largecomm", handle_large_common, 0},
1019 {"file", (void (*) (int)) dwarf2_directive_file, 0},
1020 {"loc", dwarf2_directive_loc, 0},
1021 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1024 {"secrel32", pe_directive_secrel, 0},
1029 /* For interface with expression (). */
1030 extern char *input_line_pointer;
1032 /* Hash table for instruction mnemonic lookup. */
1033 static struct hash_control *op_hash;
1035 /* Hash table for register lookup. */
1036 static struct hash_control *reg_hash;
1039 i386_align_code (fragS *fragP, int count)
1041 /* Various efficient no-op patterns for aligning code labels.
1042 Note: Don't try to assemble the instructions in the comments.
1043 0L and 0w are not legal. */
1044 static const char f32_1[] =
1046 static const char f32_2[] =
1047 {0x66,0x90}; /* xchg %ax,%ax */
1048 static const char f32_3[] =
1049 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
1050 static const char f32_4[] =
1051 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1052 static const char f32_5[] =
1054 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
1055 static const char f32_6[] =
1056 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
1057 static const char f32_7[] =
1058 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1059 static const char f32_8[] =
1061 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
1062 static const char f32_9[] =
1063 {0x89,0xf6, /* movl %esi,%esi */
1064 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1065 static const char f32_10[] =
1066 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
1067 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1068 static const char f32_11[] =
1069 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
1070 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1071 static const char f32_12[] =
1072 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1073 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
1074 static const char f32_13[] =
1075 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
1076 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1077 static const char f32_14[] =
1078 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
1079 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
1080 static const char f16_3[] =
1081 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
1082 static const char f16_4[] =
1083 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1084 static const char f16_5[] =
1086 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
1087 static const char f16_6[] =
1088 {0x89,0xf6, /* mov %si,%si */
1089 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1090 static const char f16_7[] =
1091 {0x8d,0x74,0x00, /* lea 0(%si),%si */
1092 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1093 static const char f16_8[] =
1094 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
1095 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
1096 static const char jump_31[] =
1097 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
1098 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1099 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1100 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1101 static const char *const f32_patt[] = {
1102 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1103 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1105 static const char *const f16_patt[] = {
1106 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1108 /* nopl (%[re]ax) */
1109 static const char alt_3[] =
1111 /* nopl 0(%[re]ax) */
1112 static const char alt_4[] =
1113 {0x0f,0x1f,0x40,0x00};
1114 /* nopl 0(%[re]ax,%[re]ax,1) */
1115 static const char alt_5[] =
1116 {0x0f,0x1f,0x44,0x00,0x00};
1117 /* nopw 0(%[re]ax,%[re]ax,1) */
1118 static const char alt_6[] =
1119 {0x66,0x0f,0x1f,0x44,0x00,0x00};
1120 /* nopl 0L(%[re]ax) */
1121 static const char alt_7[] =
1122 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1123 /* nopl 0L(%[re]ax,%[re]ax,1) */
1124 static const char alt_8[] =
1125 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1126 /* nopw 0L(%[re]ax,%[re]ax,1) */
1127 static const char alt_9[] =
1128 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1129 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1130 static const char alt_10[] =
1131 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1133 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1134 static const char alt_long_11[] =
1136 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1139 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1140 static const char alt_long_12[] =
1143 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1147 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1148 static const char alt_long_13[] =
1152 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1157 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1158 static const char alt_long_14[] =
1163 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1169 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1170 static const char alt_long_15[] =
1176 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1177 /* nopl 0(%[re]ax,%[re]ax,1)
1178 nopw 0(%[re]ax,%[re]ax,1) */
1179 static const char alt_short_11[] =
1180 {0x0f,0x1f,0x44,0x00,0x00,
1181 0x66,0x0f,0x1f,0x44,0x00,0x00};
1182 /* nopw 0(%[re]ax,%[re]ax,1)
1183 nopw 0(%[re]ax,%[re]ax,1) */
1184 static const char alt_short_12[] =
1185 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1186 0x66,0x0f,0x1f,0x44,0x00,0x00};
1187 /* nopw 0(%[re]ax,%[re]ax,1)
1189 static const char alt_short_13[] =
1190 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1191 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1194 static const char alt_short_14[] =
1195 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1196 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1198 nopl 0L(%[re]ax,%[re]ax,1) */
1199 static const char alt_short_15[] =
1200 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1201 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1202 static const char *const alt_short_patt[] = {
1203 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1204 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1205 alt_short_14, alt_short_15
1207 static const char *const alt_long_patt[] = {
1208 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1209 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1210 alt_long_14, alt_long_15
1213 /* Only align for at least a positive non-zero boundary. */
1214 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1217 /* We need to decide which NOP sequence to use for 32bit and
1218 64bit. When -mtune= is used:
1220 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1221 PROCESSOR_GENERIC32, f32_patt will be used.
1222 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1223 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1224 PROCESSOR_GENERIC64, alt_long_patt will be used.
1225 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1226 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1229 When -mtune= isn't used, alt_long_patt will be used if
1230 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1233 When -march= or .arch is used, we can't use anything beyond
1234 cpu_arch_isa_flags. */
1236 if (flag_code == CODE_16BIT)
1240 memcpy (fragP->fr_literal + fragP->fr_fix,
1242 /* Adjust jump offset. */
1243 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1246 memcpy (fragP->fr_literal + fragP->fr_fix,
1247 f16_patt[count - 1], count);
1251 const char *const *patt = NULL;
1253 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1255 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1256 switch (cpu_arch_tune)
1258 case PROCESSOR_UNKNOWN:
1259 /* We use cpu_arch_isa_flags to check if we SHOULD
1260 optimize with nops. */
1261 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1262 patt = alt_long_patt;
1266 case PROCESSOR_PENTIUM4:
1267 case PROCESSOR_NOCONA:
1268 case PROCESSOR_CORE:
1269 case PROCESSOR_CORE2:
1270 case PROCESSOR_COREI7:
1271 case PROCESSOR_L1OM:
1272 case PROCESSOR_K1OM:
1273 case PROCESSOR_GENERIC64:
1274 patt = alt_long_patt;
1277 case PROCESSOR_ATHLON:
1279 case PROCESSOR_AMDFAM10:
1282 patt = alt_short_patt;
1284 case PROCESSOR_I386:
1285 case PROCESSOR_I486:
1286 case PROCESSOR_PENTIUM:
1287 case PROCESSOR_PENTIUMPRO:
1288 case PROCESSOR_GENERIC32:
1295 switch (fragP->tc_frag_data.tune)
1297 case PROCESSOR_UNKNOWN:
1298 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1299 PROCESSOR_UNKNOWN. */
1303 case PROCESSOR_I386:
1304 case PROCESSOR_I486:
1305 case PROCESSOR_PENTIUM:
1307 case PROCESSOR_ATHLON:
1309 case PROCESSOR_AMDFAM10:
1312 case PROCESSOR_GENERIC32:
1313 /* We use cpu_arch_isa_flags to check if we CAN optimize
1315 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1316 patt = alt_short_patt;
1320 case PROCESSOR_PENTIUMPRO:
1321 case PROCESSOR_PENTIUM4:
1322 case PROCESSOR_NOCONA:
1323 case PROCESSOR_CORE:
1324 case PROCESSOR_CORE2:
1325 case PROCESSOR_COREI7:
1326 case PROCESSOR_L1OM:
1327 case PROCESSOR_K1OM:
1328 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1329 patt = alt_long_patt;
1333 case PROCESSOR_GENERIC64:
1334 patt = alt_long_patt;
1339 if (patt == f32_patt)
1341 /* If the padding is less than 15 bytes, we use the normal
1342 ones. Otherwise, we use a jump instruction and adjust
1346 /* For 64bit, the limit is 3 bytes. */
1347 if (flag_code == CODE_64BIT
1348 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1353 memcpy (fragP->fr_literal + fragP->fr_fix,
1354 patt[count - 1], count);
1357 memcpy (fragP->fr_literal + fragP->fr_fix,
1359 /* Adjust jump offset. */
1360 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1365 /* Maximum length of an instruction is 15 byte. If the
1366 padding is greater than 15 bytes and we don't use jump,
1367 we have to break it into smaller pieces. */
1368 int padding = count;
1369 while (padding > 15)
1372 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1377 memcpy (fragP->fr_literal + fragP->fr_fix,
1378 patt [padding - 1], padding);
1381 fragP->fr_var = count;
1385 operand_type_all_zero (const union i386_operand_type *x)
1387 switch (ARRAY_SIZE(x->array))
1396 return !x->array[0];
1403 operand_type_set (union i386_operand_type *x, unsigned int v)
1405 switch (ARRAY_SIZE(x->array))
1420 operand_type_equal (const union i386_operand_type *x,
1421 const union i386_operand_type *y)
1423 switch (ARRAY_SIZE(x->array))
1426 if (x->array[2] != y->array[2])
1429 if (x->array[1] != y->array[1])
1432 return x->array[0] == y->array[0];
1440 cpu_flags_all_zero (const union i386_cpu_flags *x)
1442 switch (ARRAY_SIZE(x->array))
1451 return !x->array[0];
1458 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1460 switch (ARRAY_SIZE(x->array))
1475 cpu_flags_equal (const union i386_cpu_flags *x,
1476 const union i386_cpu_flags *y)
1478 switch (ARRAY_SIZE(x->array))
1481 if (x->array[2] != y->array[2])
1484 if (x->array[1] != y->array[1])
1487 return x->array[0] == y->array[0];
1495 cpu_flags_check_cpu64 (i386_cpu_flags f)
1497 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1498 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1501 static INLINE i386_cpu_flags
1502 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1504 switch (ARRAY_SIZE (x.array))
1507 x.array [2] &= y.array [2];
1509 x.array [1] &= y.array [1];
1511 x.array [0] &= y.array [0];
1519 static INLINE i386_cpu_flags
1520 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1522 switch (ARRAY_SIZE (x.array))
1525 x.array [2] |= y.array [2];
1527 x.array [1] |= y.array [1];
1529 x.array [0] |= y.array [0];
1537 static INLINE i386_cpu_flags
1538 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1540 switch (ARRAY_SIZE (x.array))
1543 x.array [2] &= ~y.array [2];
1545 x.array [1] &= ~y.array [1];
1547 x.array [0] &= ~y.array [0];
1555 #define CPU_FLAGS_ARCH_MATCH 0x1
1556 #define CPU_FLAGS_64BIT_MATCH 0x2
1557 #define CPU_FLAGS_AES_MATCH 0x4
1558 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1559 #define CPU_FLAGS_AVX_MATCH 0x10
1561 #define CPU_FLAGS_32BIT_MATCH \
1562 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1563 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1564 #define CPU_FLAGS_PERFECT_MATCH \
1565 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1567 /* Return CPU flags match bits. */
1570 cpu_flags_match (const insn_template *t)
1572 i386_cpu_flags x = t->cpu_flags;
1573 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1575 x.bitfield.cpu64 = 0;
1576 x.bitfield.cpuno64 = 0;
1578 if (cpu_flags_all_zero (&x))
1580 /* This instruction is available on all archs. */
1581 match |= CPU_FLAGS_32BIT_MATCH;
1585 /* This instruction is available only on some archs. */
1586 i386_cpu_flags cpu = cpu_arch_flags;
1588 cpu.bitfield.cpu64 = 0;
1589 cpu.bitfield.cpuno64 = 0;
1590 cpu = cpu_flags_and (x, cpu);
1591 if (!cpu_flags_all_zero (&cpu))
1593 if (x.bitfield.cpuavx)
1595 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1596 if (cpu.bitfield.cpuavx)
1598 /* Check SSE2AVX. */
1599 if (!t->opcode_modifier.sse2avx|| sse2avx)
1601 match |= (CPU_FLAGS_ARCH_MATCH
1602 | CPU_FLAGS_AVX_MATCH);
1604 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1605 match |= CPU_FLAGS_AES_MATCH;
1607 if (!x.bitfield.cpupclmul
1608 || cpu.bitfield.cpupclmul)
1609 match |= CPU_FLAGS_PCLMUL_MATCH;
1613 match |= CPU_FLAGS_ARCH_MATCH;
1616 match |= CPU_FLAGS_32BIT_MATCH;
1622 static INLINE i386_operand_type
1623 operand_type_and (i386_operand_type x, i386_operand_type y)
1625 switch (ARRAY_SIZE (x.array))
1628 x.array [2] &= y.array [2];
1630 x.array [1] &= y.array [1];
1632 x.array [0] &= y.array [0];
1640 static INLINE i386_operand_type
1641 operand_type_or (i386_operand_type x, i386_operand_type y)
1643 switch (ARRAY_SIZE (x.array))
1646 x.array [2] |= y.array [2];
1648 x.array [1] |= y.array [1];
1650 x.array [0] |= y.array [0];
1658 static INLINE i386_operand_type
1659 operand_type_xor (i386_operand_type x, i386_operand_type y)
1661 switch (ARRAY_SIZE (x.array))
1664 x.array [2] ^= y.array [2];
1666 x.array [1] ^= y.array [1];
1668 x.array [0] ^= y.array [0];
1676 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1677 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1678 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1679 static const i386_operand_type inoutportreg
1680 = OPERAND_TYPE_INOUTPORTREG;
1681 static const i386_operand_type reg16_inoutportreg
1682 = OPERAND_TYPE_REG16_INOUTPORTREG;
1683 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1684 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1685 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1686 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1687 static const i386_operand_type anydisp
1688 = OPERAND_TYPE_ANYDISP;
1689 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1690 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1691 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1692 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1693 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1694 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1695 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1696 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1697 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1698 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1699 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1700 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1701 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1702 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1713 operand_type_check (i386_operand_type t, enum operand_type c)
1718 return (t.bitfield.reg8
1721 || t.bitfield.reg64);
1724 return (t.bitfield.imm8
1728 || t.bitfield.imm32s
1729 || t.bitfield.imm64);
1732 return (t.bitfield.disp8
1733 || t.bitfield.disp16
1734 || t.bitfield.disp32
1735 || t.bitfield.disp32s
1736 || t.bitfield.disp64);
1739 return (t.bitfield.disp8
1740 || t.bitfield.disp16
1741 || t.bitfield.disp32
1742 || t.bitfield.disp32s
1743 || t.bitfield.disp64
1744 || t.bitfield.baseindex);
1753 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1754 operand J for instruction template T. */
1757 match_reg_size (const insn_template *t, unsigned int j)
1759 return !((i.types[j].bitfield.byte
1760 && !t->operand_types[j].bitfield.byte)
1761 || (i.types[j].bitfield.word
1762 && !t->operand_types[j].bitfield.word)
1763 || (i.types[j].bitfield.dword
1764 && !t->operand_types[j].bitfield.dword)
1765 || (i.types[j].bitfield.qword
1766 && !t->operand_types[j].bitfield.qword));
1769 /* Return 1 if there is no conflict in any size on operand J for
1770 instruction template T. */
1773 match_mem_size (const insn_template *t, unsigned int j)
1775 return (match_reg_size (t, j)
1776 && !((i.types[j].bitfield.unspecified
1777 && !t->operand_types[j].bitfield.unspecified)
1778 || (i.types[j].bitfield.fword
1779 && !t->operand_types[j].bitfield.fword)
1780 || (i.types[j].bitfield.tbyte
1781 && !t->operand_types[j].bitfield.tbyte)
1782 || (i.types[j].bitfield.xmmword
1783 && !t->operand_types[j].bitfield.xmmword)
1784 || (i.types[j].bitfield.ymmword
1785 && !t->operand_types[j].bitfield.ymmword)
1786 || (i.types[j].bitfield.zmmword
1787 && !t->operand_types[j].bitfield.zmmword)));
1790 /* Return 1 if there is no size conflict on any operands for
1791 instruction template T. */
1794 operand_size_match (const insn_template *t)
1799 /* Don't check jump instructions. */
1800 if (t->opcode_modifier.jump
1801 || t->opcode_modifier.jumpbyte
1802 || t->opcode_modifier.jumpdword
1803 || t->opcode_modifier.jumpintersegment)
1806 /* Check memory and accumulator operand size. */
1807 for (j = 0; j < i.operands; j++)
1809 if (t->operand_types[j].bitfield.anysize)
1812 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1818 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1827 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1830 i.error = operand_size_mismatch;
1834 /* Check reverse. */
1835 gas_assert (i.operands == 2);
1838 for (j = 0; j < 2; j++)
1840 if (t->operand_types[j].bitfield.acc
1841 && !match_reg_size (t, j ? 0 : 1))
1844 if (i.types[j].bitfield.mem
1845 && !match_mem_size (t, j ? 0 : 1))
1853 operand_type_match (i386_operand_type overlap,
1854 i386_operand_type given)
1856 i386_operand_type temp = overlap;
1858 temp.bitfield.jumpabsolute = 0;
1859 temp.bitfield.unspecified = 0;
1860 temp.bitfield.byte = 0;
1861 temp.bitfield.word = 0;
1862 temp.bitfield.dword = 0;
1863 temp.bitfield.fword = 0;
1864 temp.bitfield.qword = 0;
1865 temp.bitfield.tbyte = 0;
1866 temp.bitfield.xmmword = 0;
1867 temp.bitfield.ymmword = 0;
1868 temp.bitfield.zmmword = 0;
1869 if (operand_type_all_zero (&temp))
1872 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1873 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1877 i.error = operand_type_mismatch;
1881 /* If given types g0 and g1 are registers they must be of the same type
1882 unless the expected operand type register overlap is null.
1883 Note that Acc in a template matches every size of reg. */
1886 operand_type_register_match (i386_operand_type m0,
1887 i386_operand_type g0,
1888 i386_operand_type t0,
1889 i386_operand_type m1,
1890 i386_operand_type g1,
1891 i386_operand_type t1)
1893 if (!operand_type_check (g0, reg))
1896 if (!operand_type_check (g1, reg))
1899 if (g0.bitfield.reg8 == g1.bitfield.reg8
1900 && g0.bitfield.reg16 == g1.bitfield.reg16
1901 && g0.bitfield.reg32 == g1.bitfield.reg32
1902 && g0.bitfield.reg64 == g1.bitfield.reg64)
1905 if (m0.bitfield.acc)
1907 t0.bitfield.reg8 = 1;
1908 t0.bitfield.reg16 = 1;
1909 t0.bitfield.reg32 = 1;
1910 t0.bitfield.reg64 = 1;
1913 if (m1.bitfield.acc)
1915 t1.bitfield.reg8 = 1;
1916 t1.bitfield.reg16 = 1;
1917 t1.bitfield.reg32 = 1;
1918 t1.bitfield.reg64 = 1;
1921 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1922 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1923 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1924 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1927 i.error = register_type_mismatch;
1932 static INLINE unsigned int
1933 register_number (const reg_entry *r)
1935 unsigned int nr = r->reg_num;
1937 if (r->reg_flags & RegRex)
1943 static INLINE unsigned int
1944 mode_from_disp_size (i386_operand_type t)
1946 if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1948 else if (t.bitfield.disp16
1949 || t.bitfield.disp32
1950 || t.bitfield.disp32s)
1957 fits_in_signed_byte (addressT num)
1959 return num + 0x80 <= 0xff;
1963 fits_in_unsigned_byte (addressT num)
1969 fits_in_unsigned_word (addressT num)
1971 return num <= 0xffff;
1975 fits_in_signed_word (addressT num)
1977 return num + 0x8000 <= 0xffff;
1981 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
1986 return num + 0x80000000 <= 0xffffffff;
1988 } /* fits_in_signed_long() */
1991 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
1996 return num <= 0xffffffff;
1998 } /* fits_in_unsigned_long() */
2001 fits_in_vec_disp8 (offsetT num)
2003 int shift = i.memshift;
2009 mask = (1 << shift) - 1;
2011 /* Return 0 if NUM isn't properly aligned. */
2015 /* Check if NUM will fit in 8bit after shift. */
2016 return fits_in_signed_byte (num >> shift);
2020 fits_in_imm4 (offsetT num)
2022 return (num & 0xf) == num;
2025 static i386_operand_type
2026 smallest_imm_type (offsetT num)
2028 i386_operand_type t;
2030 operand_type_set (&t, 0);
2031 t.bitfield.imm64 = 1;
2033 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2035 /* This code is disabled on the 486 because all the Imm1 forms
2036 in the opcode table are slower on the i486. They're the
2037 versions with the implicitly specified single-position
2038 displacement, which has another syntax if you really want to
2040 t.bitfield.imm1 = 1;
2041 t.bitfield.imm8 = 1;
2042 t.bitfield.imm8s = 1;
2043 t.bitfield.imm16 = 1;
2044 t.bitfield.imm32 = 1;
2045 t.bitfield.imm32s = 1;
2047 else if (fits_in_signed_byte (num))
2049 t.bitfield.imm8 = 1;
2050 t.bitfield.imm8s = 1;
2051 t.bitfield.imm16 = 1;
2052 t.bitfield.imm32 = 1;
2053 t.bitfield.imm32s = 1;
2055 else if (fits_in_unsigned_byte (num))
2057 t.bitfield.imm8 = 1;
2058 t.bitfield.imm16 = 1;
2059 t.bitfield.imm32 = 1;
2060 t.bitfield.imm32s = 1;
2062 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2064 t.bitfield.imm16 = 1;
2065 t.bitfield.imm32 = 1;
2066 t.bitfield.imm32s = 1;
2068 else if (fits_in_signed_long (num))
2070 t.bitfield.imm32 = 1;
2071 t.bitfield.imm32s = 1;
2073 else if (fits_in_unsigned_long (num))
2074 t.bitfield.imm32 = 1;
2080 offset_in_range (offsetT val, int size)
2086 case 1: mask = ((addressT) 1 << 8) - 1; break;
2087 case 2: mask = ((addressT) 1 << 16) - 1; break;
2088 case 4: mask = ((addressT) 2 << 31) - 1; break;
2090 case 8: mask = ((addressT) 2 << 63) - 1; break;
2096 /* If BFD64, sign extend val for 32bit address mode. */
2097 if (flag_code != CODE_64BIT
2098 || i.prefix[ADDR_PREFIX])
2099 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2100 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2103 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2105 char buf1[40], buf2[40];
2107 sprint_value (buf1, val);
2108 sprint_value (buf2, val & mask);
2109 as_warn (_("%s shortened to %s"), buf1, buf2);
2123 a. PREFIX_EXIST if attempting to add a prefix where one from the
2124 same class already exists.
2125 b. PREFIX_LOCK if lock prefix is added.
2126 c. PREFIX_REP if rep/repne prefix is added.
2127 d. PREFIX_OTHER if other prefix is added.
2130 static enum PREFIX_GROUP
2131 add_prefix (unsigned int prefix)
2133 enum PREFIX_GROUP ret = PREFIX_OTHER;
2136 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2137 && flag_code == CODE_64BIT)
2139 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2140 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2141 && (prefix & (REX_R | REX_X | REX_B))))
2152 case CS_PREFIX_OPCODE:
2153 case DS_PREFIX_OPCODE:
2154 case ES_PREFIX_OPCODE:
2155 case FS_PREFIX_OPCODE:
2156 case GS_PREFIX_OPCODE:
2157 case SS_PREFIX_OPCODE:
2161 case REPNE_PREFIX_OPCODE:
2162 case REPE_PREFIX_OPCODE:
2167 case LOCK_PREFIX_OPCODE:
2176 case ADDR_PREFIX_OPCODE:
2180 case DATA_PREFIX_OPCODE:
2184 if (i.prefix[q] != 0)
2192 i.prefix[q] |= prefix;
2195 as_bad (_("same type of prefix used twice"));
2201 update_code_flag (int value, int check)
2203 PRINTF_LIKE ((*as_error));
2205 flag_code = (enum flag_code) value;
2206 if (flag_code == CODE_64BIT)
2208 cpu_arch_flags.bitfield.cpu64 = 1;
2209 cpu_arch_flags.bitfield.cpuno64 = 0;
2213 cpu_arch_flags.bitfield.cpu64 = 0;
2214 cpu_arch_flags.bitfield.cpuno64 = 1;
2216 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2219 as_error = as_fatal;
2222 (*as_error) (_("64bit mode not supported on `%s'."),
2223 cpu_arch_name ? cpu_arch_name : default_arch);
2225 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2228 as_error = as_fatal;
2231 (*as_error) (_("32bit mode not supported on `%s'."),
2232 cpu_arch_name ? cpu_arch_name : default_arch);
2234 stackop_size = '\0';
2238 set_code_flag (int value)
2240 update_code_flag (value, 0);
2244 set_16bit_gcc_code_flag (int new_code_flag)
2246 flag_code = (enum flag_code) new_code_flag;
2247 if (flag_code != CODE_16BIT)
2249 cpu_arch_flags.bitfield.cpu64 = 0;
2250 cpu_arch_flags.bitfield.cpuno64 = 1;
2251 stackop_size = LONG_MNEM_SUFFIX;
2255 set_intel_syntax (int syntax_flag)
2257 /* Find out if register prefixing is specified. */
2258 int ask_naked_reg = 0;
2261 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2263 char *string = input_line_pointer;
2264 int e = get_symbol_end ();
2266 if (strcmp (string, "prefix") == 0)
2268 else if (strcmp (string, "noprefix") == 0)
2271 as_bad (_("bad argument to syntax directive."));
2272 *input_line_pointer = e;
2274 demand_empty_rest_of_line ();
2276 intel_syntax = syntax_flag;
2278 if (ask_naked_reg == 0)
2279 allow_naked_reg = (intel_syntax
2280 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2282 allow_naked_reg = (ask_naked_reg < 0);
2284 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2286 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2287 identifier_chars['$'] = intel_syntax ? '$' : 0;
2288 register_prefix = allow_naked_reg ? "" : "%";
2292 set_intel_mnemonic (int mnemonic_flag)
2294 intel_mnemonic = mnemonic_flag;
2298 set_allow_index_reg (int flag)
2300 allow_index_reg = flag;
2304 set_check (int what)
2306 enum check_kind *kind;
2311 kind = &operand_check;
2322 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2324 char *string = input_line_pointer;
2325 int e = get_symbol_end ();
2327 if (strcmp (string, "none") == 0)
2329 else if (strcmp (string, "warning") == 0)
2330 *kind = check_warning;
2331 else if (strcmp (string, "error") == 0)
2332 *kind = check_error;
2334 as_bad (_("bad argument to %s_check directive."), str);
2335 *input_line_pointer = e;
2338 as_bad (_("missing argument for %s_check directive"), str);
2340 demand_empty_rest_of_line ();
2344 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2345 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2347 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2348 static const char *arch;
2350 /* Intel LIOM is only supported on ELF. */
2356 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2357 use default_arch. */
2358 arch = cpu_arch_name;
2360 arch = default_arch;
2363 /* If we are targeting Intel L1OM, we must enable it. */
2364 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2365 || new_flag.bitfield.cpul1om)
2368 /* If we are targeting Intel K1OM, we must enable it. */
2369 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2370 || new_flag.bitfield.cpuk1om)
2373 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2378 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2382 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2384 char *string = input_line_pointer;
2385 int e = get_symbol_end ();
2387 i386_cpu_flags flags;
2389 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2391 if (strcmp (string, cpu_arch[j].name) == 0)
2393 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2397 cpu_arch_name = cpu_arch[j].name;
2398 cpu_sub_arch_name = NULL;
2399 cpu_arch_flags = cpu_arch[j].flags;
2400 if (flag_code == CODE_64BIT)
2402 cpu_arch_flags.bitfield.cpu64 = 1;
2403 cpu_arch_flags.bitfield.cpuno64 = 0;
2407 cpu_arch_flags.bitfield.cpu64 = 0;
2408 cpu_arch_flags.bitfield.cpuno64 = 1;
2410 cpu_arch_isa = cpu_arch[j].type;
2411 cpu_arch_isa_flags = cpu_arch[j].flags;
2412 if (!cpu_arch_tune_set)
2414 cpu_arch_tune = cpu_arch_isa;
2415 cpu_arch_tune_flags = cpu_arch_isa_flags;
2420 if (!cpu_arch[j].negated)
2421 flags = cpu_flags_or (cpu_arch_flags,
2424 flags = cpu_flags_and_not (cpu_arch_flags,
2426 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2428 if (cpu_sub_arch_name)
2430 char *name = cpu_sub_arch_name;
2431 cpu_sub_arch_name = concat (name,
2433 (const char *) NULL);
2437 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2438 cpu_arch_flags = flags;
2439 cpu_arch_isa_flags = flags;
2441 *input_line_pointer = e;
2442 demand_empty_rest_of_line ();
2446 if (j >= ARRAY_SIZE (cpu_arch))
2447 as_bad (_("no such architecture: `%s'"), string);
2449 *input_line_pointer = e;
2452 as_bad (_("missing cpu architecture"));
2454 no_cond_jump_promotion = 0;
2455 if (*input_line_pointer == ','
2456 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2458 char *string = ++input_line_pointer;
2459 int e = get_symbol_end ();
2461 if (strcmp (string, "nojumps") == 0)
2462 no_cond_jump_promotion = 1;
2463 else if (strcmp (string, "jumps") == 0)
2466 as_bad (_("no such architecture modifier: `%s'"), string);
2468 *input_line_pointer = e;
2471 demand_empty_rest_of_line ();
2474 enum bfd_architecture
2477 if (cpu_arch_isa == PROCESSOR_L1OM)
2479 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2480 || flag_code != CODE_64BIT)
2481 as_fatal (_("Intel L1OM is 64bit ELF only"));
2482 return bfd_arch_l1om;
2484 else if (cpu_arch_isa == PROCESSOR_K1OM)
2486 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2487 || flag_code != CODE_64BIT)
2488 as_fatal (_("Intel K1OM is 64bit ELF only"));
2489 return bfd_arch_k1om;
2492 return bfd_arch_i386;
2498 if (!strncmp (default_arch, "x86_64", 6))
2500 if (cpu_arch_isa == PROCESSOR_L1OM)
2502 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2503 || default_arch[6] != '\0')
2504 as_fatal (_("Intel L1OM is 64bit ELF only"));
2505 return bfd_mach_l1om;
2507 else if (cpu_arch_isa == PROCESSOR_K1OM)
2509 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2510 || default_arch[6] != '\0')
2511 as_fatal (_("Intel K1OM is 64bit ELF only"));
2512 return bfd_mach_k1om;
2514 else if (default_arch[6] == '\0')
2515 return bfd_mach_x86_64;
2517 return bfd_mach_x64_32;
2519 else if (!strcmp (default_arch, "i386"))
2520 return bfd_mach_i386_i386;
2522 as_fatal (_("unknown architecture"));
2528 const char *hash_err;
2530 /* Initialize op_hash hash table. */
2531 op_hash = hash_new ();
2534 const insn_template *optab;
2535 templates *core_optab;
2537 /* Setup for loop. */
2539 core_optab = (templates *) xmalloc (sizeof (templates));
2540 core_optab->start = optab;
2545 if (optab->name == NULL
2546 || strcmp (optab->name, (optab - 1)->name) != 0)
2548 /* different name --> ship out current template list;
2549 add to hash table; & begin anew. */
2550 core_optab->end = optab;
2551 hash_err = hash_insert (op_hash,
2553 (void *) core_optab);
2556 as_fatal (_("can't hash %s: %s"),
2560 if (optab->name == NULL)
2562 core_optab = (templates *) xmalloc (sizeof (templates));
2563 core_optab->start = optab;
2568 /* Initialize reg_hash hash table. */
2569 reg_hash = hash_new ();
2571 const reg_entry *regtab;
2572 unsigned int regtab_size = i386_regtab_size;
2574 for (regtab = i386_regtab; regtab_size--; regtab++)
2576 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2578 as_fatal (_("can't hash %s: %s"),
2584 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2589 for (c = 0; c < 256; c++)
2594 mnemonic_chars[c] = c;
2595 register_chars[c] = c;
2596 operand_chars[c] = c;
2598 else if (ISLOWER (c))
2600 mnemonic_chars[c] = c;
2601 register_chars[c] = c;
2602 operand_chars[c] = c;
2604 else if (ISUPPER (c))
2606 mnemonic_chars[c] = TOLOWER (c);
2607 register_chars[c] = mnemonic_chars[c];
2608 operand_chars[c] = c;
2610 else if (c == '{' || c == '}')
2611 operand_chars[c] = c;
2613 if (ISALPHA (c) || ISDIGIT (c))
2614 identifier_chars[c] = c;
2617 identifier_chars[c] = c;
2618 operand_chars[c] = c;
2623 identifier_chars['@'] = '@';
2626 identifier_chars['?'] = '?';
2627 operand_chars['?'] = '?';
2629 digit_chars['-'] = '-';
2630 mnemonic_chars['_'] = '_';
2631 mnemonic_chars['-'] = '-';
2632 mnemonic_chars['.'] = '.';
2633 identifier_chars['_'] = '_';
2634 identifier_chars['.'] = '.';
2636 for (p = operand_special_chars; *p != '\0'; p++)
2637 operand_chars[(unsigned char) *p] = *p;
2640 if (flag_code == CODE_64BIT)
2642 #if defined (OBJ_COFF) && defined (TE_PE)
2643 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2646 x86_dwarf2_return_column = 16;
2648 x86_cie_data_alignment = -8;
2652 x86_dwarf2_return_column = 8;
2653 x86_cie_data_alignment = -4;
2658 i386_print_statistics (FILE *file)
2660 hash_print_statistics (file, "i386 opcode", op_hash);
2661 hash_print_statistics (file, "i386 register", reg_hash);
2666 /* Debugging routines for md_assemble. */
2667 static void pte (insn_template *);
2668 static void pt (i386_operand_type);
2669 static void pe (expressionS *);
2670 static void ps (symbolS *);
2673 pi (char *line, i386_insn *x)
2677 fprintf (stdout, "%s: template ", line);
2679 fprintf (stdout, " address: base %s index %s scale %x\n",
2680 x->base_reg ? x->base_reg->reg_name : "none",
2681 x->index_reg ? x->index_reg->reg_name : "none",
2682 x->log2_scale_factor);
2683 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2684 x->rm.mode, x->rm.reg, x->rm.regmem);
2685 fprintf (stdout, " sib: base %x index %x scale %x\n",
2686 x->sib.base, x->sib.index, x->sib.scale);
2687 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2688 (x->rex & REX_W) != 0,
2689 (x->rex & REX_R) != 0,
2690 (x->rex & REX_X) != 0,
2691 (x->rex & REX_B) != 0);
2692 for (j = 0; j < x->operands; j++)
2694 fprintf (stdout, " #%d: ", j + 1);
2696 fprintf (stdout, "\n");
2697 if (x->types[j].bitfield.reg8
2698 || x->types[j].bitfield.reg16
2699 || x->types[j].bitfield.reg32
2700 || x->types[j].bitfield.reg64
2701 || x->types[j].bitfield.regmmx
2702 || x->types[j].bitfield.regxmm
2703 || x->types[j].bitfield.regymm
2704 || x->types[j].bitfield.regzmm
2705 || x->types[j].bitfield.sreg2
2706 || x->types[j].bitfield.sreg3
2707 || x->types[j].bitfield.control
2708 || x->types[j].bitfield.debug
2709 || x->types[j].bitfield.test)
2710 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2711 if (operand_type_check (x->types[j], imm))
2713 if (operand_type_check (x->types[j], disp))
2714 pe (x->op[j].disps);
2719 pte (insn_template *t)
2722 fprintf (stdout, " %d operands ", t->operands);
2723 fprintf (stdout, "opcode %x ", t->base_opcode);
2724 if (t->extension_opcode != None)
2725 fprintf (stdout, "ext %x ", t->extension_opcode);
2726 if (t->opcode_modifier.d)
2727 fprintf (stdout, "D");
2728 if (t->opcode_modifier.w)
2729 fprintf (stdout, "W");
2730 fprintf (stdout, "\n");
2731 for (j = 0; j < t->operands; j++)
2733 fprintf (stdout, " #%d type ", j + 1);
2734 pt (t->operand_types[j]);
2735 fprintf (stdout, "\n");
2742 fprintf (stdout, " operation %d\n", e->X_op);
2743 fprintf (stdout, " add_number %ld (%lx)\n",
2744 (long) e->X_add_number, (long) e->X_add_number);
2745 if (e->X_add_symbol)
2747 fprintf (stdout, " add_symbol ");
2748 ps (e->X_add_symbol);
2749 fprintf (stdout, "\n");
2753 fprintf (stdout, " op_symbol ");
2754 ps (e->X_op_symbol);
2755 fprintf (stdout, "\n");
2762 fprintf (stdout, "%s type %s%s",
2764 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2765 segment_name (S_GET_SEGMENT (s)));
2768 static struct type_name
2770 i386_operand_type mask;
2773 const type_names[] =
2775 { OPERAND_TYPE_REG8, "r8" },
2776 { OPERAND_TYPE_REG16, "r16" },
2777 { OPERAND_TYPE_REG32, "r32" },
2778 { OPERAND_TYPE_REG64, "r64" },
2779 { OPERAND_TYPE_IMM8, "i8" },
2780 { OPERAND_TYPE_IMM8, "i8s" },
2781 { OPERAND_TYPE_IMM16, "i16" },
2782 { OPERAND_TYPE_IMM32, "i32" },
2783 { OPERAND_TYPE_IMM32S, "i32s" },
2784 { OPERAND_TYPE_IMM64, "i64" },
2785 { OPERAND_TYPE_IMM1, "i1" },
2786 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2787 { OPERAND_TYPE_DISP8, "d8" },
2788 { OPERAND_TYPE_DISP16, "d16" },
2789 { OPERAND_TYPE_DISP32, "d32" },
2790 { OPERAND_TYPE_DISP32S, "d32s" },
2791 { OPERAND_TYPE_DISP64, "d64" },
2792 { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2793 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2794 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2795 { OPERAND_TYPE_CONTROL, "control reg" },
2796 { OPERAND_TYPE_TEST, "test reg" },
2797 { OPERAND_TYPE_DEBUG, "debug reg" },
2798 { OPERAND_TYPE_FLOATREG, "FReg" },
2799 { OPERAND_TYPE_FLOATACC, "FAcc" },
2800 { OPERAND_TYPE_SREG2, "SReg2" },
2801 { OPERAND_TYPE_SREG3, "SReg3" },
2802 { OPERAND_TYPE_ACC, "Acc" },
2803 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2804 { OPERAND_TYPE_REGMMX, "rMMX" },
2805 { OPERAND_TYPE_REGXMM, "rXMM" },
2806 { OPERAND_TYPE_REGYMM, "rYMM" },
2807 { OPERAND_TYPE_REGZMM, "rZMM" },
2808 { OPERAND_TYPE_REGMASK, "Mask reg" },
2809 { OPERAND_TYPE_ESSEG, "es" },
2813 pt (i386_operand_type t)
2816 i386_operand_type a;
2818 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2820 a = operand_type_and (t, type_names[j].mask);
2821 if (!operand_type_all_zero (&a))
2822 fprintf (stdout, "%s, ", type_names[j].name);
2827 #endif /* DEBUG386 */
2829 static bfd_reloc_code_real_type
2830 reloc (unsigned int size,
2833 bfd_reloc_code_real_type other)
2835 if (other != NO_RELOC)
2837 reloc_howto_type *rel;
2842 case BFD_RELOC_X86_64_GOT32:
2843 return BFD_RELOC_X86_64_GOT64;
2845 case BFD_RELOC_X86_64_PLTOFF64:
2846 return BFD_RELOC_X86_64_PLTOFF64;
2848 case BFD_RELOC_X86_64_GOTPC32:
2849 other = BFD_RELOC_X86_64_GOTPC64;
2851 case BFD_RELOC_X86_64_GOTPCREL:
2852 other = BFD_RELOC_X86_64_GOTPCREL64;
2854 case BFD_RELOC_X86_64_TPOFF32:
2855 other = BFD_RELOC_X86_64_TPOFF64;
2857 case BFD_RELOC_X86_64_DTPOFF32:
2858 other = BFD_RELOC_X86_64_DTPOFF64;
2864 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2865 if (other == BFD_RELOC_SIZE32)
2868 other = BFD_RELOC_SIZE64;
2871 as_bad (_("there are no pc-relative size relocations"));
2877 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2878 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2881 rel = bfd_reloc_type_lookup (stdoutput, other);
2883 as_bad (_("unknown relocation (%u)"), other);
2884 else if (size != bfd_get_reloc_size (rel))
2885 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2886 bfd_get_reloc_size (rel),
2888 else if (pcrel && !rel->pc_relative)
2889 as_bad (_("non-pc-relative relocation for pc-relative field"));
2890 else if ((rel->complain_on_overflow == complain_overflow_signed
2892 || (rel->complain_on_overflow == complain_overflow_unsigned
2894 as_bad (_("relocated field and relocation type differ in signedness"));
2903 as_bad (_("there are no unsigned pc-relative relocations"));
2906 case 1: return BFD_RELOC_8_PCREL;
2907 case 2: return BFD_RELOC_16_PCREL;
2908 case 4: return BFD_RELOC_32_PCREL;
2909 case 8: return BFD_RELOC_64_PCREL;
2911 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2918 case 4: return BFD_RELOC_X86_64_32S;
2923 case 1: return BFD_RELOC_8;
2924 case 2: return BFD_RELOC_16;
2925 case 4: return BFD_RELOC_32;
2926 case 8: return BFD_RELOC_64;
2928 as_bad (_("cannot do %s %u byte relocation"),
2929 sign > 0 ? "signed" : "unsigned", size);
2935 /* Here we decide which fixups can be adjusted to make them relative to
2936 the beginning of the section instead of the symbol. Basically we need
2937 to make sure that the dynamic relocations are done correctly, so in
2938 some cases we force the original symbol to be used. */
2941 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2943 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2947 /* Don't adjust pc-relative references to merge sections in 64-bit
2949 if (use_rela_relocations
2950 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2954 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2955 and changed later by validate_fix. */
2956 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2957 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2960 /* Adjust_reloc_syms doesn't know about the GOT. Need to keep symbol
2961 for size relocations. */
2962 if (fixP->fx_r_type == BFD_RELOC_SIZE32
2963 || fixP->fx_r_type == BFD_RELOC_SIZE64
2964 || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2965 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2966 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2967 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2968 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2969 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2970 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2971 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2972 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2973 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2974 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2975 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2976 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2977 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2978 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2979 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2980 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2981 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2982 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2983 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2984 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2985 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2986 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2987 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2988 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2989 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2990 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2991 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2998 intel_float_operand (const char *mnemonic)
3000 /* Note that the value returned is meaningful only for opcodes with (memory)
3001 operands, hence the code here is free to improperly handle opcodes that
3002 have no operands (for better performance and smaller code). */
3004 if (mnemonic[0] != 'f')
3005 return 0; /* non-math */
3007 switch (mnemonic[1])
3009 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3010 the fs segment override prefix not currently handled because no
3011 call path can make opcodes without operands get here */
3013 return 2 /* integer op */;
3015 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3016 return 3; /* fldcw/fldenv */
3019 if (mnemonic[2] != 'o' /* fnop */)
3020 return 3; /* non-waiting control op */
3023 if (mnemonic[2] == 's')
3024 return 3; /* frstor/frstpm */
3027 if (mnemonic[2] == 'a')
3028 return 3; /* fsave */
3029 if (mnemonic[2] == 't')
3031 switch (mnemonic[3])
3033 case 'c': /* fstcw */
3034 case 'd': /* fstdw */
3035 case 'e': /* fstenv */
3036 case 's': /* fsts[gw] */
3042 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3043 return 0; /* fxsave/fxrstor are not really math ops */
3050 /* Build the VEX prefix. */
3053 build_vex_prefix (const insn_template *t)
3055 unsigned int register_specifier;
3056 unsigned int implied_prefix;
3057 unsigned int vector_length;
3059 /* Check register specifier. */
3060 if (i.vex.register_specifier)
3062 register_specifier =
3063 ~register_number (i.vex.register_specifier) & 0xf;
3064 gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3067 register_specifier = 0xf;
3069 /* Use 2-byte VEX prefix by swappping destination and source
3072 && i.operands == i.reg_operands
3073 && i.tm.opcode_modifier.vexopcode == VEX0F
3074 && i.tm.opcode_modifier.s
3077 unsigned int xchg = i.operands - 1;
3078 union i386_op temp_op;
3079 i386_operand_type temp_type;
3081 temp_type = i.types[xchg];
3082 i.types[xchg] = i.types[0];
3083 i.types[0] = temp_type;
3084 temp_op = i.op[xchg];
3085 i.op[xchg] = i.op[0];
3088 gas_assert (i.rm.mode == 3);
3092 i.rm.regmem = i.rm.reg;
3095 /* Use the next insn. */
3099 if (i.tm.opcode_modifier.vex == VEXScalar)
3100 vector_length = avxscalar;
3102 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3104 switch ((i.tm.base_opcode >> 8) & 0xff)
3109 case DATA_PREFIX_OPCODE:
3112 case REPE_PREFIX_OPCODE:
3115 case REPNE_PREFIX_OPCODE:
3122 /* Use 2-byte VEX prefix if possible. */
3123 if (i.tm.opcode_modifier.vexopcode == VEX0F
3124 && i.tm.opcode_modifier.vexw != VEXW1
3125 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3127 /* 2-byte VEX prefix. */
3131 i.vex.bytes[0] = 0xc5;
3133 /* Check the REX.R bit. */
3134 r = (i.rex & REX_R) ? 0 : 1;
3135 i.vex.bytes[1] = (r << 7
3136 | register_specifier << 3
3137 | vector_length << 2
3142 /* 3-byte VEX prefix. */
3147 switch (i.tm.opcode_modifier.vexopcode)
3151 i.vex.bytes[0] = 0xc4;
3155 i.vex.bytes[0] = 0xc4;
3159 i.vex.bytes[0] = 0xc4;
3163 i.vex.bytes[0] = 0x8f;
3167 i.vex.bytes[0] = 0x8f;
3171 i.vex.bytes[0] = 0x8f;
3177 /* The high 3 bits of the second VEX byte are 1's compliment
3178 of RXB bits from REX. */
3179 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3181 /* Check the REX.W bit. */
3182 w = (i.rex & REX_W) ? 1 : 0;
3183 if (i.tm.opcode_modifier.vexw == VEXW1)
3186 i.vex.bytes[2] = (w << 7
3187 | register_specifier << 3
3188 | vector_length << 2
3193 /* Build the EVEX prefix. */
3196 build_evex_prefix (void)
3198 unsigned int register_specifier;
3199 unsigned int implied_prefix;
3201 rex_byte vrex_used = 0;
3203 /* Check register specifier. */
3204 if (i.vex.register_specifier)
3206 gas_assert ((i.vrex & REX_X) == 0);
3208 register_specifier = i.vex.register_specifier->reg_num;
3209 if ((i.vex.register_specifier->reg_flags & RegRex))
3210 register_specifier += 8;
3211 /* The upper 16 registers are encoded in the fourth byte of the
3213 if (!(i.vex.register_specifier->reg_flags & RegVRex))
3214 i.vex.bytes[3] = 0x8;
3215 register_specifier = ~register_specifier & 0xf;
3219 register_specifier = 0xf;
3221 /* Encode upper 16 vector index register in the fourth byte of
3223 if (!(i.vrex & REX_X))
3224 i.vex.bytes[3] = 0x8;
3229 switch ((i.tm.base_opcode >> 8) & 0xff)
3234 case DATA_PREFIX_OPCODE:
3237 case REPE_PREFIX_OPCODE:
3240 case REPNE_PREFIX_OPCODE:
3247 /* 4 byte EVEX prefix. */
3249 i.vex.bytes[0] = 0x62;
3252 switch (i.tm.opcode_modifier.vexopcode)
3268 /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3270 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3272 /* The fifth bit of the second EVEX byte is 1's compliment of the
3273 REX_R bit in VREX. */
3274 if (!(i.vrex & REX_R))
3275 i.vex.bytes[1] |= 0x10;
3279 if ((i.reg_operands + i.imm_operands) == i.operands)
3281 /* When all operands are registers, the REX_X bit in REX is not
3282 used. We reuse it to encode the upper 16 registers, which is
3283 indicated by the REX_B bit in VREX. The REX_X bit is encoded
3284 as 1's compliment. */
3285 if ((i.vrex & REX_B))
3288 i.vex.bytes[1] &= ~0x40;
3292 /* EVEX instructions shouldn't need the REX prefix. */
3293 i.vrex &= ~vrex_used;
3294 gas_assert (i.vrex == 0);
3296 /* Check the REX.W bit. */
3297 w = (i.rex & REX_W) ? 1 : 0;
3298 if (i.tm.opcode_modifier.vexw)
3300 if (i.tm.opcode_modifier.vexw == VEXW1)
3303 /* If w is not set it means we are dealing with WIG instruction. */
3306 if (evexwig == evexw1)
3310 /* Encode the U bit. */
3311 implied_prefix |= 0x4;
3313 /* The third byte of the EVEX prefix. */
3314 i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3316 /* The fourth byte of the EVEX prefix. */
3317 /* The zeroing-masking bit. */
3318 if (i.mask && i.mask->zeroing)
3319 i.vex.bytes[3] |= 0x80;
3321 /* Don't always set the broadcast bit if there is no RC. */
3324 /* Encode the vector length. */
3325 unsigned int vec_length;
3327 switch (i.tm.opcode_modifier.evex)
3329 case EVEXLIG: /* LL' is ignored */
3330 vec_length = evexlig << 5;
3333 vec_length = 0 << 5;
3336 vec_length = 1 << 5;
3339 vec_length = 2 << 5;
3345 i.vex.bytes[3] |= vec_length;
3346 /* Encode the broadcast bit. */
3348 i.vex.bytes[3] |= 0x10;
3352 if (i.rounding->type != saeonly)
3353 i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3355 i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3358 if (i.mask && i.mask->mask)
3359 i.vex.bytes[3] |= i.mask->mask->reg_num;
3363 process_immext (void)
3367 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3370 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3371 with an opcode suffix which is coded in the same place as an
3372 8-bit immediate field would be.
3373 Here we check those operands and remove them afterwards. */
3376 for (x = 0; x < i.operands; x++)
3377 if (register_number (i.op[x].regs) != x)
3378 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3379 register_prefix, i.op[x].regs->reg_name, x + 1,
3385 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3386 which is coded in the same place as an 8-bit immediate field
3387 would be. Here we fake an 8-bit immediate operand from the
3388 opcode suffix stored in tm.extension_opcode.
3390 AVX instructions also use this encoding, for some of
3391 3 argument instructions. */
3393 gas_assert (i.imm_operands <= 1
3395 || ((i.tm.opcode_modifier.vex
3396 || i.tm.opcode_modifier.evex)
3397 && i.operands <= 4)));
3399 exp = &im_expressions[i.imm_operands++];
3400 i.op[i.operands].imms = exp;
3401 i.types[i.operands] = imm8;
3403 exp->X_op = O_constant;
3404 exp->X_add_number = i.tm.extension_opcode;
3405 i.tm.extension_opcode = None;
3412 switch (i.tm.opcode_modifier.hleprefixok)
3417 as_bad (_("invalid instruction `%s' after `%s'"),
3418 i.tm.name, i.hle_prefix);
3421 if (i.prefix[LOCK_PREFIX])
3423 as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3427 case HLEPrefixRelease:
3428 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3430 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3434 if (i.mem_operands == 0
3435 || !operand_type_check (i.types[i.operands - 1], anymem))
3437 as_bad (_("memory destination needed for instruction `%s'"
3438 " after `xrelease'"), i.tm.name);
3445 /* This is the guts of the machine-dependent assembler. LINE points to a
3446 machine dependent instruction. This function is supposed to emit
3447 the frags/bytes it assembles to. */
3450 md_assemble (char *line)
3453 char mnemonic[MAX_MNEM_SIZE];
3454 const insn_template *t;
3456 /* Initialize globals. */
3457 memset (&i, '\0', sizeof (i));
3458 for (j = 0; j < MAX_OPERANDS; j++)
3459 i.reloc[j] = NO_RELOC;
3460 memset (disp_expressions, '\0', sizeof (disp_expressions));
3461 memset (im_expressions, '\0', sizeof (im_expressions));
3462 save_stack_p = save_stack;
3464 /* First parse an instruction mnemonic & call i386_operand for the operands.
3465 We assume that the scrubber has arranged it so that line[0] is the valid
3466 start of a (possibly prefixed) mnemonic. */
3468 line = parse_insn (line, mnemonic);
3472 line = parse_operands (line, mnemonic);
3477 /* Now we've parsed the mnemonic into a set of templates, and have the
3478 operands at hand. */
3480 /* All intel opcodes have reversed operands except for "bound" and
3481 "enter". We also don't reverse intersegment "jmp" and "call"
3482 instructions with 2 immediate operands so that the immediate segment
3483 precedes the offset, as it does when in AT&T mode. */
3486 && (strcmp (mnemonic, "bound") != 0)
3487 && (strcmp (mnemonic, "invlpga") != 0)
3488 && !(operand_type_check (i.types[0], imm)
3489 && operand_type_check (i.types[1], imm)))
3492 /* The order of the immediates should be reversed
3493 for 2 immediates extrq and insertq instructions */
3494 if (i.imm_operands == 2
3495 && (strcmp (mnemonic, "extrq") == 0
3496 || strcmp (mnemonic, "insertq") == 0))
3497 swap_2_operands (0, 1);
3502 /* Don't optimize displacement for movabs since it only takes 64bit
3505 && i.disp_encoding != disp_encoding_32bit
3506 && (flag_code != CODE_64BIT
3507 || strcmp (mnemonic, "movabs") != 0))
3510 /* Next, we find a template that matches the given insn,
3511 making sure the overlap of the given operands types is consistent
3512 with the template operand types. */
3514 if (!(t = match_template ()))
3517 if (sse_check != check_none
3518 && !i.tm.opcode_modifier.noavx
3519 && (i.tm.cpu_flags.bitfield.cpusse
3520 || i.tm.cpu_flags.bitfield.cpusse2
3521 || i.tm.cpu_flags.bitfield.cpusse3
3522 || i.tm.cpu_flags.bitfield.cpussse3
3523 || i.tm.cpu_flags.bitfield.cpusse4_1
3524 || i.tm.cpu_flags.bitfield.cpusse4_2))
3526 (sse_check == check_warning
3528 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3531 /* Zap movzx and movsx suffix. The suffix has been set from
3532 "word ptr" or "byte ptr" on the source operand in Intel syntax
3533 or extracted from mnemonic in AT&T syntax. But we'll use
3534 the destination register to choose the suffix for encoding. */
3535 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3537 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3538 there is no suffix, the default will be byte extension. */
3539 if (i.reg_operands != 2
3542 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3547 if (i.tm.opcode_modifier.fwait)
3548 if (!add_prefix (FWAIT_OPCODE))
3551 /* Check if REP prefix is OK. */
3552 if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3554 as_bad (_("invalid instruction `%s' after `%s'"),
3555 i.tm.name, i.rep_prefix);
3559 /* Check for lock without a lockable instruction. Destination operand
3560 must be memory unless it is xchg (0x86). */
3561 if (i.prefix[LOCK_PREFIX]
3562 && (!i.tm.opcode_modifier.islockable
3563 || i.mem_operands == 0
3564 || (i.tm.base_opcode != 0x86
3565 && !operand_type_check (i.types[i.operands - 1], anymem))))
3567 as_bad (_("expecting lockable instruction after `lock'"));
3571 /* Check if HLE prefix is OK. */
3572 if (i.hle_prefix && !check_hle ())
3575 /* Check BND prefix. */
3576 if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3577 as_bad (_("expecting valid branch instruction after `bnd'"));
3579 if (i.tm.cpu_flags.bitfield.cpumpx
3580 && flag_code == CODE_64BIT
3581 && i.prefix[ADDR_PREFIX])
3582 as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3584 /* Insert BND prefix. */
3586 && i.tm.opcode_modifier.bndprefixok
3587 && !i.prefix[BND_PREFIX])
3588 add_prefix (BND_PREFIX_OPCODE);
3590 /* Check string instruction segment overrides. */
3591 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3593 if (!check_string ())
3595 i.disp_operands = 0;
3598 if (!process_suffix ())
3601 /* Update operand types. */
3602 for (j = 0; j < i.operands; j++)
3603 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3605 /* Make still unresolved immediate matches conform to size of immediate
3606 given in i.suffix. */
3607 if (!finalize_imm ())
3610 if (i.types[0].bitfield.imm1)
3611 i.imm_operands = 0; /* kludge for shift insns. */
3613 /* We only need to check those implicit registers for instructions
3614 with 3 operands or less. */
3615 if (i.operands <= 3)
3616 for (j = 0; j < i.operands; j++)
3617 if (i.types[j].bitfield.inoutportreg
3618 || i.types[j].bitfield.shiftcount
3619 || i.types[j].bitfield.acc
3620 || i.types[j].bitfield.floatacc)
3623 /* ImmExt should be processed after SSE2AVX. */
3624 if (!i.tm.opcode_modifier.sse2avx
3625 && i.tm.opcode_modifier.immext)
3628 /* For insns with operands there are more diddles to do to the opcode. */
3631 if (!process_operands ())
3634 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3636 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3637 as_warn (_("translating to `%sp'"), i.tm.name);
3640 if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
3642 if (flag_code == CODE_16BIT)
3644 as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3649 if (i.tm.opcode_modifier.vex)
3650 build_vex_prefix (t);
3652 build_evex_prefix ();
3655 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3656 instructions may define INT_OPCODE as well, so avoid this corner
3657 case for those instructions that use MODRM. */
3658 if (i.tm.base_opcode == INT_OPCODE
3659 && !i.tm.opcode_modifier.modrm
3660 && i.op[0].imms->X_add_number == 3)
3662 i.tm.base_opcode = INT3_OPCODE;
3666 if ((i.tm.opcode_modifier.jump
3667 || i.tm.opcode_modifier.jumpbyte
3668 || i.tm.opcode_modifier.jumpdword)
3669 && i.op[0].disps->X_op == O_constant)
3671 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3672 the absolute address given by the constant. Since ix86 jumps and
3673 calls are pc relative, we need to generate a reloc. */
3674 i.op[0].disps->X_add_symbol = &abs_symbol;
3675 i.op[0].disps->X_op = O_symbol;
3678 if (i.tm.opcode_modifier.rex64)
3681 /* For 8 bit registers we need an empty rex prefix. Also if the
3682 instruction already has a prefix, we need to convert old
3683 registers to new ones. */
3685 if ((i.types[0].bitfield.reg8
3686 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3687 || (i.types[1].bitfield.reg8
3688 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3689 || ((i.types[0].bitfield.reg8
3690 || i.types[1].bitfield.reg8)
3695 i.rex |= REX_OPCODE;
3696 for (x = 0; x < 2; x++)
3698 /* Look for 8 bit operand that uses old registers. */
3699 if (i.types[x].bitfield.reg8
3700 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3702 /* In case it is "hi" register, give up. */
3703 if (i.op[x].regs->reg_num > 3)
3704 as_bad (_("can't encode register '%s%s' in an "
3705 "instruction requiring REX prefix."),
3706 register_prefix, i.op[x].regs->reg_name);
3708 /* Otherwise it is equivalent to the extended register.
3709 Since the encoding doesn't change this is merely
3710 cosmetic cleanup for debug output. */
3712 i.op[x].regs = i.op[x].regs + 8;
3718 add_prefix (REX_OPCODE | i.rex);
3720 /* We are ready to output the insn. */
3725 parse_insn (char *line, char *mnemonic)
3728 char *token_start = l;
3731 const insn_template *t;
3737 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3742 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3744 as_bad (_("no such instruction: `%s'"), token_start);
3749 if (!is_space_char (*l)
3750 && *l != END_OF_INSN
3752 || (*l != PREFIX_SEPARATOR
3755 as_bad (_("invalid character %s in mnemonic"),
3756 output_invalid (*l));
3759 if (token_start == l)
3761 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3762 as_bad (_("expecting prefix; got nothing"));
3764 as_bad (_("expecting mnemonic; got nothing"));
3768 /* Look up instruction (or prefix) via hash table. */
3769 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3771 if (*l != END_OF_INSN
3772 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3773 && current_templates
3774 && current_templates->start->opcode_modifier.isprefix)
3776 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3778 as_bad ((flag_code != CODE_64BIT
3779 ? _("`%s' is only supported in 64-bit mode")
3780 : _("`%s' is not supported in 64-bit mode")),
3781 current_templates->start->name);
3784 /* If we are in 16-bit mode, do not allow addr16 or data16.
3785 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3786 if ((current_templates->start->opcode_modifier.size16
3787 || current_templates->start->opcode_modifier.size32)
3788 && flag_code != CODE_64BIT
3789 && (current_templates->start->opcode_modifier.size32
3790 ^ (flag_code == CODE_16BIT)))
3792 as_bad (_("redundant %s prefix"),
3793 current_templates->start->name);
3796 /* Add prefix, checking for repeated prefixes. */
3797 switch (add_prefix (current_templates->start->base_opcode))
3802 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3803 i.hle_prefix = current_templates->start->name;
3804 else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3805 i.bnd_prefix = current_templates->start->name;
3807 i.rep_prefix = current_templates->start->name;
3812 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3819 if (!current_templates)
3821 /* Check if we should swap operand or force 32bit displacement in
3823 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3825 else if (mnem_p - 3 == dot_p
3828 i.disp_encoding = disp_encoding_8bit;
3829 else if (mnem_p - 4 == dot_p
3833 i.disp_encoding = disp_encoding_32bit;
3838 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3841 if (!current_templates)
3844 /* See if we can get a match by trimming off a suffix. */
3847 case WORD_MNEM_SUFFIX:
3848 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3849 i.suffix = SHORT_MNEM_SUFFIX;
3851 case BYTE_MNEM_SUFFIX:
3852 case QWORD_MNEM_SUFFIX:
3853 i.suffix = mnem_p[-1];
3855 current_templates = (const templates *) hash_find (op_hash,
3858 case SHORT_MNEM_SUFFIX:
3859 case LONG_MNEM_SUFFIX:
3862 i.suffix = mnem_p[-1];
3864 current_templates = (const templates *) hash_find (op_hash,
3873 if (intel_float_operand (mnemonic) == 1)
3874 i.suffix = SHORT_MNEM_SUFFIX;
3876 i.suffix = LONG_MNEM_SUFFIX;
3878 current_templates = (const templates *) hash_find (op_hash,
3883 if (!current_templates)
3885 as_bad (_("no such instruction: `%s'"), token_start);
3890 if (current_templates->start->opcode_modifier.jump
3891 || current_templates->start->opcode_modifier.jumpbyte)
3893 /* Check for a branch hint. We allow ",pt" and ",pn" for
3894 predict taken and predict not taken respectively.
3895 I'm not sure that branch hints actually do anything on loop
3896 and jcxz insns (JumpByte) for current Pentium4 chips. They
3897 may work in the future and it doesn't hurt to accept them
3899 if (l[0] == ',' && l[1] == 'p')
3903 if (!add_prefix (DS_PREFIX_OPCODE))
3907 else if (l[2] == 'n')
3909 if (!add_prefix (CS_PREFIX_OPCODE))
3915 /* Any other comma loses. */
3918 as_bad (_("invalid character %s in mnemonic"),
3919 output_invalid (*l));
3923 /* Check if instruction is supported on specified architecture. */
3925 for (t = current_templates->start; t < current_templates->end; ++t)
3927 supported |= cpu_flags_match (t);
3928 if (supported == CPU_FLAGS_PERFECT_MATCH)
3932 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3934 as_bad (flag_code == CODE_64BIT
3935 ? _("`%s' is not supported in 64-bit mode")
3936 : _("`%s' is only supported in 64-bit mode"),
3937 current_templates->start->name);
3940 if (supported != CPU_FLAGS_PERFECT_MATCH)
3942 as_bad (_("`%s' is not supported on `%s%s'"),
3943 current_templates->start->name,
3944 cpu_arch_name ? cpu_arch_name : default_arch,
3945 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3950 if (!cpu_arch_flags.bitfield.cpui386
3951 && (flag_code != CODE_16BIT))
3953 as_warn (_("use .code16 to ensure correct addressing mode"));
3960 parse_operands (char *l, const char *mnemonic)
3964 /* 1 if operand is pending after ','. */
3965 unsigned int expecting_operand = 0;
3967 /* Non-zero if operand parens not balanced. */
3968 unsigned int paren_not_balanced;
3970 while (*l != END_OF_INSN)
3972 /* Skip optional white space before operand. */
3973 if (is_space_char (*l))
3975 if (!is_operand_char (*l) && *l != END_OF_INSN)
3977 as_bad (_("invalid character %s before operand %d"),
3978 output_invalid (*l),
3982 token_start = l; /* after white space */
3983 paren_not_balanced = 0;
3984 while (paren_not_balanced || *l != ',')
3986 if (*l == END_OF_INSN)
3988 if (paren_not_balanced)
3991 as_bad (_("unbalanced parenthesis in operand %d."),
3994 as_bad (_("unbalanced brackets in operand %d."),
3999 break; /* we are done */
4001 else if (!is_operand_char (*l) && !is_space_char (*l))
4003 as_bad (_("invalid character %s in operand %d"),
4004 output_invalid (*l),
4011 ++paren_not_balanced;
4013 --paren_not_balanced;
4018 ++paren_not_balanced;
4020 --paren_not_balanced;
4024 if (l != token_start)
4025 { /* Yes, we've read in another operand. */
4026 unsigned int operand_ok;
4027 this_operand = i.operands++;
4028 i.types[this_operand].bitfield.unspecified = 1;
4029 if (i.operands > MAX_OPERANDS)
4031 as_bad (_("spurious operands; (%d operands/instruction max)"),
4035 /* Now parse operand adding info to 'i' as we go along. */
4036 END_STRING_AND_SAVE (l);
4040 i386_intel_operand (token_start,
4041 intel_float_operand (mnemonic));
4043 operand_ok = i386_att_operand (token_start);
4045 RESTORE_END_STRING (l);
4051 if (expecting_operand)
4053 expecting_operand_after_comma:
4054 as_bad (_("expecting operand after ','; got nothing"));
4059 as_bad (_("expecting operand before ','; got nothing"));
4064 /* Now *l must be either ',' or END_OF_INSN. */
4067 if (*++l == END_OF_INSN)
4069 /* Just skip it, if it's \n complain. */
4070 goto expecting_operand_after_comma;
4072 expecting_operand = 1;
4079 swap_2_operands (int xchg1, int xchg2)
4081 union i386_op temp_op;
4082 i386_operand_type temp_type;
4083 enum bfd_reloc_code_real temp_reloc;
4085 temp_type = i.types[xchg2];
4086 i.types[xchg2] = i.types[xchg1];
4087 i.types[xchg1] = temp_type;
4088 temp_op = i.op[xchg2];
4089 i.op[xchg2] = i.op[xchg1];
4090 i.op[xchg1] = temp_op;
4091 temp_reloc = i.reloc[xchg2];
4092 i.reloc[xchg2] = i.reloc[xchg1];
4093 i.reloc[xchg1] = temp_reloc;
4097 if (i.mask->operand == xchg1)
4098 i.mask->operand = xchg2;
4099 else if (i.mask->operand == xchg2)
4100 i.mask->operand = xchg1;
4104 if (i.broadcast->operand == xchg1)
4105 i.broadcast->operand = xchg2;
4106 else if (i.broadcast->operand == xchg2)
4107 i.broadcast->operand = xchg1;
4111 if (i.rounding->operand == xchg1)
4112 i.rounding->operand = xchg2;
4113 else if (i.rounding->operand == xchg2)
4114 i.rounding->operand = xchg1;
4119 swap_operands (void)
4125 swap_2_operands (1, i.operands - 2);
4128 swap_2_operands (0, i.operands - 1);
4134 if (i.mem_operands == 2)
4136 const seg_entry *temp_seg;
4137 temp_seg = i.seg[0];
4138 i.seg[0] = i.seg[1];
4139 i.seg[1] = temp_seg;
4143 /* Try to ensure constant immediates are represented in the smallest
4148 char guess_suffix = 0;
4152 guess_suffix = i.suffix;
4153 else if (i.reg_operands)
4155 /* Figure out a suffix from the last register operand specified.
4156 We can't do this properly yet, ie. excluding InOutPortReg,
4157 but the following works for instructions with immediates.
4158 In any case, we can't set i.suffix yet. */
4159 for (op = i.operands; --op >= 0;)
4160 if (i.types[op].bitfield.reg8)
4162 guess_suffix = BYTE_MNEM_SUFFIX;
4165 else if (i.types[op].bitfield.reg16)
4167 guess_suffix = WORD_MNEM_SUFFIX;
4170 else if (i.types[op].bitfield.reg32)
4172 guess_suffix = LONG_MNEM_SUFFIX;
4175 else if (i.types[op].bitfield.reg64)
4177 guess_suffix = QWORD_MNEM_SUFFIX;
4181 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4182 guess_suffix = WORD_MNEM_SUFFIX;
4184 for (op = i.operands; --op >= 0;)
4185 if (operand_type_check (i.types[op], imm))
4187 switch (i.op[op].imms->X_op)
4190 /* If a suffix is given, this operand may be shortened. */
4191 switch (guess_suffix)
4193 case LONG_MNEM_SUFFIX:
4194 i.types[op].bitfield.imm32 = 1;
4195 i.types[op].bitfield.imm64 = 1;
4197 case WORD_MNEM_SUFFIX:
4198 i.types[op].bitfield.imm16 = 1;
4199 i.types[op].bitfield.imm32 = 1;
4200 i.types[op].bitfield.imm32s = 1;
4201 i.types[op].bitfield.imm64 = 1;
4203 case BYTE_MNEM_SUFFIX:
4204 i.types[op].bitfield.imm8 = 1;
4205 i.types[op].bitfield.imm8s = 1;
4206 i.types[op].bitfield.imm16 = 1;
4207 i.types[op].bitfield.imm32 = 1;
4208 i.types[op].bitfield.imm32s = 1;
4209 i.types[op].bitfield.imm64 = 1;
4213 /* If this operand is at most 16 bits, convert it
4214 to a signed 16 bit number before trying to see
4215 whether it will fit in an even smaller size.
4216 This allows a 16-bit operand such as $0xffe0 to
4217 be recognised as within Imm8S range. */
4218 if ((i.types[op].bitfield.imm16)
4219 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4221 i.op[op].imms->X_add_number =
4222 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4224 if ((i.types[op].bitfield.imm32)
4225 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4228 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4229 ^ ((offsetT) 1 << 31))
4230 - ((offsetT) 1 << 31));
4233 = operand_type_or (i.types[op],
4234 smallest_imm_type (i.op[op].imms->X_add_number));
4236 /* We must avoid matching of Imm32 templates when 64bit
4237 only immediate is available. */
4238 if (guess_suffix == QWORD_MNEM_SUFFIX)
4239 i.types[op].bitfield.imm32 = 0;
4246 /* Symbols and expressions. */
4248 /* Convert symbolic operand to proper sizes for matching, but don't
4249 prevent matching a set of insns that only supports sizes other
4250 than those matching the insn suffix. */
4252 i386_operand_type mask, allowed;
4253 const insn_template *t;
4255 operand_type_set (&mask, 0);
4256 operand_type_set (&allowed, 0);
4258 for (t = current_templates->start;
4259 t < current_templates->end;
4261 allowed = operand_type_or (allowed,
4262 t->operand_types[op]);
4263 switch (guess_suffix)
4265 case QWORD_MNEM_SUFFIX:
4266 mask.bitfield.imm64 = 1;
4267 mask.bitfield.imm32s = 1;
4269 case LONG_MNEM_SUFFIX:
4270 mask.bitfield.imm32 = 1;
4272 case WORD_MNEM_SUFFIX:
4273 mask.bitfield.imm16 = 1;
4275 case BYTE_MNEM_SUFFIX:
4276 mask.bitfield.imm8 = 1;
4281 allowed = operand_type_and (mask, allowed);
4282 if (!operand_type_all_zero (&allowed))
4283 i.types[op] = operand_type_and (i.types[op], mask);
4290 /* Try to use the smallest displacement type too. */
4292 optimize_disp (void)
4296 for (op = i.operands; --op >= 0;)
4297 if (operand_type_check (i.types[op], disp))
4299 if (i.op[op].disps->X_op == O_constant)
4301 offsetT op_disp = i.op[op].disps->X_add_number;
4303 if (i.types[op].bitfield.disp16
4304 && (op_disp & ~(offsetT) 0xffff) == 0)
4306 /* If this operand is at most 16 bits, convert
4307 to a signed 16 bit number and don't use 64bit
4309 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4310 i.types[op].bitfield.disp64 = 0;
4312 if (i.types[op].bitfield.disp32
4313 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4315 /* If this operand is at most 32 bits, convert
4316 to a signed 32 bit number and don't use 64bit
4318 op_disp &= (((offsetT) 2 << 31) - 1);
4319 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4320 i.types[op].bitfield.disp64 = 0;
4322 if (!op_disp && i.types[op].bitfield.baseindex)
4324 i.types[op].bitfield.disp8 = 0;
4325 i.types[op].bitfield.disp16 = 0;
4326 i.types[op].bitfield.disp32 = 0;
4327 i.types[op].bitfield.disp32s = 0;
4328 i.types[op].bitfield.disp64 = 0;
4332 else if (flag_code == CODE_64BIT)
4334 if (fits_in_signed_long (op_disp))
4336 i.types[op].bitfield.disp64 = 0;
4337 i.types[op].bitfield.disp32s = 1;
4339 if (i.prefix[ADDR_PREFIX]
4340 && fits_in_unsigned_long (op_disp))
4341 i.types[op].bitfield.disp32 = 1;
4343 if ((i.types[op].bitfield.disp32
4344 || i.types[op].bitfield.disp32s
4345 || i.types[op].bitfield.disp16)
4346 && fits_in_signed_byte (op_disp))
4347 i.types[op].bitfield.disp8 = 1;
4349 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4350 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4352 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4353 i.op[op].disps, 0, i.reloc[op]);
4354 i.types[op].bitfield.disp8 = 0;
4355 i.types[op].bitfield.disp16 = 0;
4356 i.types[op].bitfield.disp32 = 0;
4357 i.types[op].bitfield.disp32s = 0;
4358 i.types[op].bitfield.disp64 = 0;
4361 /* We only support 64bit displacement on constants. */
4362 i.types[op].bitfield.disp64 = 0;
4366 /* Check if operands are valid for the instruction. */
4369 check_VecOperands (const insn_template *t)
4373 /* Without VSIB byte, we can't have a vector register for index. */
4374 if (!t->opcode_modifier.vecsib
4376 && (i.index_reg->reg_type.bitfield.regxmm
4377 || i.index_reg->reg_type.bitfield.regymm
4378 || i.index_reg->reg_type.bitfield.regzmm))
4380 i.error = unsupported_vector_index_register;
4384 /* Check if default mask is allowed. */
4385 if (t->opcode_modifier.nodefmask
4386 && (!i.mask || i.mask->mask->reg_num == 0))
4388 i.error = no_default_mask;
4392 /* For VSIB byte, we need a vector register for index, and all vector
4393 registers must be distinct. */
4394 if (t->opcode_modifier.vecsib)
4397 || !((t->opcode_modifier.vecsib == VecSIB128
4398 && i.index_reg->reg_type.bitfield.regxmm)
4399 || (t->opcode_modifier.vecsib == VecSIB256
4400 && i.index_reg->reg_type.bitfield.regymm)
4401 || (t->opcode_modifier.vecsib == VecSIB512
4402 && i.index_reg->reg_type.bitfield.regzmm)))
4404 i.error = invalid_vsib_address;
4408 gas_assert (i.reg_operands == 2 || i.mask);
4409 if (i.reg_operands == 2 && !i.mask)
4411 gas_assert (i.types[0].bitfield.regxmm
4412 || i.types[0].bitfield.regymm);
4413 gas_assert (i.types[2].bitfield.regxmm
4414 || i.types[2].bitfield.regymm);
4415 if (operand_check == check_none)
4417 if (register_number (i.op[0].regs)
4418 != register_number (i.index_reg)
4419 && register_number (i.op[2].regs)
4420 != register_number (i.index_reg)
4421 && register_number (i.op[0].regs)
4422 != register_number (i.op[2].regs))
4424 if (operand_check == check_error)
4426 i.error = invalid_vector_register_set;
4429 as_warn (_("mask, index, and destination registers should be distinct"));
4431 else if (i.reg_operands == 1 && i.mask)
4433 if ((i.types[1].bitfield.regymm
4434 || i.types[1].bitfield.regzmm)
4435 && (register_number (i.op[1].regs)
4436 == register_number (i.index_reg)))
4438 if (operand_check == check_error)
4440 i.error = invalid_vector_register_set;
4443 if (operand_check != check_none)
4444 as_warn (_("index and destination registers should be distinct"));
4449 /* Check if broadcast is supported by the instruction and is applied
4450 to the memory operand. */
4453 int broadcasted_opnd_size;
4455 /* Check if specified broadcast is supported in this instruction,
4456 and it's applied to memory operand of DWORD or QWORD type,
4457 depending on VecESize. */
4458 if (i.broadcast->type != t->opcode_modifier.broadcast
4459 || !i.types[i.broadcast->operand].bitfield.mem
4460 || (t->opcode_modifier.vecesize == 0
4461 && !i.types[i.broadcast->operand].bitfield.dword
4462 && !i.types[i.broadcast->operand].bitfield.unspecified)
4463 || (t->opcode_modifier.vecesize == 1
4464 && !i.types[i.broadcast->operand].bitfield.qword
4465 && !i.types[i.broadcast->operand].bitfield.unspecified))
4468 broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4469 if (i.broadcast->type == BROADCAST_1TO16)
4470 broadcasted_opnd_size <<= 4; /* Broadcast 1to16. */
4471 else if (i.broadcast->type == BROADCAST_1TO8)
4472 broadcasted_opnd_size <<= 3; /* Broadcast 1to8. */
4473 else if (i.broadcast->type == BROADCAST_1TO4)
4474 broadcasted_opnd_size <<= 2; /* Broadcast 1to4. */
4475 else if (i.broadcast->type == BROADCAST_1TO2)
4476 broadcasted_opnd_size <<= 1; /* Broadcast 1to2. */
4480 if ((broadcasted_opnd_size == 256
4481 && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4482 || (broadcasted_opnd_size == 512
4483 && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4486 i.error = unsupported_broadcast;
4490 /* If broadcast is supported in this instruction, we need to check if
4491 operand of one-element size isn't specified without broadcast. */
4492 else if (t->opcode_modifier.broadcast && i.mem_operands)
4494 /* Find memory operand. */
4495 for (op = 0; op < i.operands; op++)
4496 if (operand_type_check (i.types[op], anymem))
4498 gas_assert (op < i.operands);
4499 /* Check size of the memory operand. */
4500 if ((t->opcode_modifier.vecesize == 0
4501 && i.types[op].bitfield.dword)
4502 || (t->opcode_modifier.vecesize == 1
4503 && i.types[op].bitfield.qword))
4505 i.error = broadcast_needed;
4510 /* Check if requested masking is supported. */
4512 && (!t->opcode_modifier.masking
4514 && t->opcode_modifier.masking == MERGING_MASKING)))
4516 i.error = unsupported_masking;
4520 /* Check if masking is applied to dest operand. */
4521 if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4523 i.error = mask_not_on_destination;
4530 if ((i.rounding->type != saeonly
4531 && !t->opcode_modifier.staticrounding)
4532 || (i.rounding->type == saeonly
4533 && (t->opcode_modifier.staticrounding
4534 || !t->opcode_modifier.sae)))
4536 i.error = unsupported_rc_sae;
4539 /* If the instruction has several immediate operands and one of
4540 them is rounding, the rounding operand should be the last
4541 immediate operand. */
4542 if (i.imm_operands > 1
4543 && i.rounding->operand != (int) (i.imm_operands - 1))
4545 i.error = rc_sae_operand_not_last_imm;
4550 /* Check vector Disp8 operand. */
4551 if (t->opcode_modifier.disp8memshift)
4554 i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4556 i.memshift = t->opcode_modifier.disp8memshift;
4558 for (op = 0; op < i.operands; op++)
4559 if (operand_type_check (i.types[op], disp)
4560 && i.op[op].disps->X_op == O_constant)
4562 offsetT value = i.op[op].disps->X_add_number;
4563 int vec_disp8_ok = fits_in_vec_disp8 (value);
4564 if (t->operand_types [op].bitfield.vec_disp8)
4567 i.types[op].bitfield.vec_disp8 = 1;
4570 /* Vector insn can only have Vec_Disp8/Disp32 in
4571 32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4573 i.types[op].bitfield.disp8 = 0;
4574 if (flag_code != CODE_16BIT)
4575 i.types[op].bitfield.disp16 = 0;
4578 else if (flag_code != CODE_16BIT)
4580 /* One form of this instruction supports vector Disp8.
4581 Try vector Disp8 if we need to use Disp32. */
4582 if (vec_disp8_ok && !fits_in_signed_byte (value))
4584 i.error = try_vector_disp8;
4596 /* Check if operands are valid for the instruction. Update VEX
4600 VEX_check_operands (const insn_template *t)
4602 /* VREX is only valid with EVEX prefix. */
4603 if (i.need_vrex && !t->opcode_modifier.evex)
4605 i.error = invalid_register_operand;
4609 if (!t->opcode_modifier.vex)
4612 /* Only check VEX_Imm4, which must be the first operand. */
4613 if (t->operand_types[0].bitfield.vec_imm4)
4615 if (i.op[0].imms->X_op != O_constant
4616 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4622 /* Turn off Imm8 so that update_imm won't complain. */
4623 i.types[0] = vec_imm4;
4629 static const insn_template *
4630 match_template (void)
4632 /* Points to template once we've found it. */
4633 const insn_template *t;
4634 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4635 i386_operand_type overlap4;
4636 unsigned int found_reverse_match;
4637 i386_opcode_modifier suffix_check;
4638 i386_operand_type operand_types [MAX_OPERANDS];
4639 int addr_prefix_disp;
4641 unsigned int found_cpu_match;
4642 unsigned int check_register;
4643 enum i386_error specific_error = 0;
4645 #if MAX_OPERANDS != 5
4646 # error "MAX_OPERANDS must be 5."
4649 found_reverse_match = 0;
4650 addr_prefix_disp = -1;
4652 memset (&suffix_check, 0, sizeof (suffix_check));
4653 if (i.suffix == BYTE_MNEM_SUFFIX)
4654 suffix_check.no_bsuf = 1;
4655 else if (i.suffix == WORD_MNEM_SUFFIX)
4656 suffix_check.no_wsuf = 1;
4657 else if (i.suffix == SHORT_MNEM_SUFFIX)
4658 suffix_check.no_ssuf = 1;
4659 else if (i.suffix == LONG_MNEM_SUFFIX)
4660 suffix_check.no_lsuf = 1;
4661 else if (i.suffix == QWORD_MNEM_SUFFIX)
4662 suffix_check.no_qsuf = 1;
4663 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4664 suffix_check.no_ldsuf = 1;
4666 /* Must have right number of operands. */
4667 i.error = number_of_operands_mismatch;
4669 for (t = current_templates->start; t < current_templates->end; t++)
4671 addr_prefix_disp = -1;
4673 if (i.operands != t->operands)
4676 /* Check processor support. */
4677 i.error = unsupported;
4678 found_cpu_match = (cpu_flags_match (t)
4679 == CPU_FLAGS_PERFECT_MATCH);
4680 if (!found_cpu_match)
4683 /* Check old gcc support. */
4684 i.error = old_gcc_only;
4685 if (!old_gcc && t->opcode_modifier.oldgcc)
4688 /* Check AT&T mnemonic. */
4689 i.error = unsupported_with_intel_mnemonic;
4690 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4693 /* Check AT&T/Intel syntax. */
4694 i.error = unsupported_syntax;
4695 if ((intel_syntax && t->opcode_modifier.attsyntax)
4696 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4699 /* Check the suffix, except for some instructions in intel mode. */
4700 i.error = invalid_instruction_suffix;
4701 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4702 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4703 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4704 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4705 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4706 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4707 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4710 if (!operand_size_match (t))
4713 for (j = 0; j < MAX_OPERANDS; j++)
4714 operand_types[j] = t->operand_types[j];
4716 /* In general, don't allow 64-bit operands in 32-bit mode. */
4717 if (i.suffix == QWORD_MNEM_SUFFIX
4718 && flag_code != CODE_64BIT
4720 ? (!t->opcode_modifier.ignoresize
4721 && !intel_float_operand (t->name))
4722 : intel_float_operand (t->name) != 2)
4723 && ((!operand_types[0].bitfield.regmmx
4724 && !operand_types[0].bitfield.regxmm
4725 && !operand_types[0].bitfield.regymm
4726 && !operand_types[0].bitfield.regzmm)
4727 || (!operand_types[t->operands > 1].bitfield.regmmx
4728 && operand_types[t->operands > 1].bitfield.regxmm
4729 && operand_types[t->operands > 1].bitfield.regymm
4730 && operand_types[t->operands > 1].bitfield.regzmm))
4731 && (t->base_opcode != 0x0fc7
4732 || t->extension_opcode != 1 /* cmpxchg8b */))
4735 /* In general, don't allow 32-bit operands on pre-386. */
4736 else if (i.suffix == LONG_MNEM_SUFFIX
4737 && !cpu_arch_flags.bitfield.cpui386
4739 ? (!t->opcode_modifier.ignoresize
4740 && !intel_float_operand (t->name))
4741 : intel_float_operand (t->name) != 2)
4742 && ((!operand_types[0].bitfield.regmmx
4743 && !operand_types[0].bitfield.regxmm)
4744 || (!operand_types[t->operands > 1].bitfield.regmmx
4745 && operand_types[t->operands > 1].bitfield.regxmm)))
4748 /* Do not verify operands when there are none. */
4752 /* We've found a match; break out of loop. */
4756 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4757 into Disp32/Disp16/Disp32 operand. */
4758 if (i.prefix[ADDR_PREFIX] != 0)
4760 /* There should be only one Disp operand. */
4764 for (j = 0; j < MAX_OPERANDS; j++)
4766 if (operand_types[j].bitfield.disp16)
4768 addr_prefix_disp = j;
4769 operand_types[j].bitfield.disp32 = 1;
4770 operand_types[j].bitfield.disp16 = 0;
4776 for (j = 0; j < MAX_OPERANDS; j++)
4778 if (operand_types[j].bitfield.disp32)
4780 addr_prefix_disp = j;
4781 operand_types[j].bitfield.disp32 = 0;
4782 operand_types[j].bitfield.disp16 = 1;
4788 for (j = 0; j < MAX_OPERANDS; j++)
4790 if (operand_types[j].bitfield.disp64)
4792 addr_prefix_disp = j;
4793 operand_types[j].bitfield.disp64 = 0;
4794 operand_types[j].bitfield.disp32 = 1;
4802 /* We check register size if needed. */
4803 check_register = t->opcode_modifier.checkregsize;
4804 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4805 switch (t->operands)
4808 if (!operand_type_match (overlap0, i.types[0]))
4812 /* xchg %eax, %eax is a special case. It is an aliase for nop
4813 only in 32bit mode and we can use opcode 0x90. In 64bit
4814 mode, we can't use 0x90 for xchg %eax, %eax since it should
4815 zero-extend %eax to %rax. */
4816 if (flag_code == CODE_64BIT
4817 && t->base_opcode == 0x90
4818 && operand_type_equal (&i.types [0], &acc32)
4819 && operand_type_equal (&i.types [1], &acc32))
4823 /* If we swap operand in encoding, we either match
4824 the next one or reverse direction of operands. */
4825 if (t->opcode_modifier.s)
4827 else if (t->opcode_modifier.d)
4832 /* If we swap operand in encoding, we match the next one. */
4833 if (i.swap_operand && t->opcode_modifier.s)
4837 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4838 if (!operand_type_match (overlap0, i.types[0])
4839 || !operand_type_match (overlap1, i.types[1])
4841 && !operand_type_register_match (overlap0, i.types[0],
4843 overlap1, i.types[1],
4846 /* Check if other direction is valid ... */
4847 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4851 /* Try reversing direction of operands. */
4852 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4853 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4854 if (!operand_type_match (overlap0, i.types[0])
4855 || !operand_type_match (overlap1, i.types[1])
4857 && !operand_type_register_match (overlap0,
4864 /* Does not match either direction. */
4867 /* found_reverse_match holds which of D or FloatDR
4869 if (t->opcode_modifier.d)
4870 found_reverse_match = Opcode_D;
4871 else if (t->opcode_modifier.floatd)
4872 found_reverse_match = Opcode_FloatD;
4874 found_reverse_match = 0;
4875 if (t->opcode_modifier.floatr)
4876 found_reverse_match |= Opcode_FloatR;
4880 /* Found a forward 2 operand match here. */
4881 switch (t->operands)
4884 overlap4 = operand_type_and (i.types[4],
4887 overlap3 = operand_type_and (i.types[3],
4890 overlap2 = operand_type_and (i.types[2],
4895 switch (t->operands)
4898 if (!operand_type_match (overlap4, i.types[4])
4899 || !operand_type_register_match (overlap3,
4907 if (!operand_type_match (overlap3, i.types[3])
4909 && !operand_type_register_match (overlap2,
4917 /* Here we make use of the fact that there are no
4918 reverse match 3 operand instructions, and all 3
4919 operand instructions only need to be checked for
4920 register consistency between operands 2 and 3. */
4921 if (!operand_type_match (overlap2, i.types[2])
4923 && !operand_type_register_match (overlap1,
4933 /* Found either forward/reverse 2, 3 or 4 operand match here:
4934 slip through to break. */
4936 if (!found_cpu_match)
4938 found_reverse_match = 0;
4942 /* Check if vector and VEX operands are valid. */
4943 if (check_VecOperands (t) || VEX_check_operands (t))
4945 specific_error = i.error;
4949 /* We've found a match; break out of loop. */
4953 if (t == current_templates->end)
4955 /* We found no match. */
4956 const char *err_msg;
4957 switch (specific_error ? specific_error : i.error)
4961 case operand_size_mismatch:
4962 err_msg = _("operand size mismatch");
4964 case operand_type_mismatch:
4965 err_msg = _("operand type mismatch");
4967 case register_type_mismatch:
4968 err_msg = _("register type mismatch");
4970 case number_of_operands_mismatch:
4971 err_msg = _("number of operands mismatch");
4973 case invalid_instruction_suffix:
4974 err_msg = _("invalid instruction suffix");
4977 err_msg = _("constant doesn't fit in 4 bits");
4980 err_msg = _("only supported with old gcc");
4982 case unsupported_with_intel_mnemonic:
4983 err_msg = _("unsupported with Intel mnemonic");
4985 case unsupported_syntax:
4986 err_msg = _("unsupported syntax");
4989 as_bad (_("unsupported instruction `%s'"),
4990 current_templates->start->name);
4992 case invalid_vsib_address:
4993 err_msg = _("invalid VSIB address");
4995 case invalid_vector_register_set:
4996 err_msg = _("mask, index, and destination registers must be distinct");
4998 case unsupported_vector_index_register:
4999 err_msg = _("unsupported vector index register");
5001 case unsupported_broadcast:
5002 err_msg = _("unsupported broadcast");
5004 case broadcast_not_on_src_operand:
5005 err_msg = _("broadcast not on source memory operand");
5007 case broadcast_needed:
5008 err_msg = _("broadcast is needed for operand of such type");
5010 case unsupported_masking:
5011 err_msg = _("unsupported masking");
5013 case mask_not_on_destination:
5014 err_msg = _("mask not on destination operand");
5016 case no_default_mask:
5017 err_msg = _("default mask isn't allowed");
5019 case unsupported_rc_sae:
5020 err_msg = _("unsupported static rounding/sae");
5022 case rc_sae_operand_not_last_imm:
5024 err_msg = _("RC/SAE operand must precede immediate operands");
5026 err_msg = _("RC/SAE operand must follow immediate operands");
5028 case invalid_register_operand:
5029 err_msg = _("invalid register operand");
5032 as_bad (_("%s for `%s'"), err_msg,
5033 current_templates->start->name);
5037 if (!quiet_warnings)
5040 && (i.types[0].bitfield.jumpabsolute
5041 != operand_types[0].bitfield.jumpabsolute))
5043 as_warn (_("indirect %s without `*'"), t->name);
5046 if (t->opcode_modifier.isprefix
5047 && t->opcode_modifier.ignoresize)
5049 /* Warn them that a data or address size prefix doesn't
5050 affect assembly of the next line of code. */
5051 as_warn (_("stand-alone `%s' prefix"), t->name);
5055 /* Copy the template we found. */
5058 if (addr_prefix_disp != -1)
5059 i.tm.operand_types[addr_prefix_disp]
5060 = operand_types[addr_prefix_disp];
5062 if (found_reverse_match)
5064 /* If we found a reverse match we must alter the opcode
5065 direction bit. found_reverse_match holds bits to change
5066 (different for int & float insns). */
5068 i.tm.base_opcode ^= found_reverse_match;
5070 i.tm.operand_types[0] = operand_types[1];
5071 i.tm.operand_types[1] = operand_types[0];
5080 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5081 if (i.tm.operand_types[mem_op].bitfield.esseg)
5083 if (i.seg[0] != NULL && i.seg[0] != &es)
5085 as_bad (_("`%s' operand %d must use `%ses' segment"),
5091 /* There's only ever one segment override allowed per instruction.
5092 This instruction possibly has a legal segment override on the
5093 second operand, so copy the segment to where non-string
5094 instructions store it, allowing common code. */
5095 i.seg[0] = i.seg[1];
5097 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5099 if (i.seg[1] != NULL && i.seg[1] != &es)
5101 as_bad (_("`%s' operand %d must use `%ses' segment"),
5112 process_suffix (void)
5114 /* If matched instruction specifies an explicit instruction mnemonic
5116 if (i.tm.opcode_modifier.size16)
5117 i.suffix = WORD_MNEM_SUFFIX;
5118 else if (i.tm.opcode_modifier.size32)
5119 i.suffix = LONG_MNEM_SUFFIX;
5120 else if (i.tm.opcode_modifier.size64)
5121 i.suffix = QWORD_MNEM_SUFFIX;
5122 else if (i.reg_operands)
5124 /* If there's no instruction mnemonic suffix we try to invent one
5125 based on register operands. */
5128 /* We take i.suffix from the last register operand specified,
5129 Destination register type is more significant than source
5130 register type. crc32 in SSE4.2 prefers source register
5132 if (i.tm.base_opcode == 0xf20f38f1)
5134 if (i.types[0].bitfield.reg16)
5135 i.suffix = WORD_MNEM_SUFFIX;
5136 else if (i.types[0].bitfield.reg32)
5137 i.suffix = LONG_MNEM_SUFFIX;
5138 else if (i.types[0].bitfield.reg64)
5139 i.suffix = QWORD_MNEM_SUFFIX;
5141 else if (i.tm.base_opcode == 0xf20f38f0)
5143 if (i.types[0].bitfield.reg8)
5144 i.suffix = BYTE_MNEM_SUFFIX;
5151 if (i.tm.base_opcode == 0xf20f38f1
5152 || i.tm.base_opcode == 0xf20f38f0)
5154 /* We have to know the operand size for crc32. */
5155 as_bad (_("ambiguous memory operand size for `%s`"),
5160 for (op = i.operands; --op >= 0;)
5161 if (!i.tm.operand_types[op].bitfield.inoutportreg)
5163 if (i.types[op].bitfield.reg8)
5165 i.suffix = BYTE_MNEM_SUFFIX;
5168 else if (i.types[op].bitfield.reg16)
5170 i.suffix = WORD_MNEM_SUFFIX;
5173 else if (i.types[op].bitfield.reg32)
5175 i.suffix = LONG_MNEM_SUFFIX;
5178 else if (i.types[op].bitfield.reg64)
5180 i.suffix = QWORD_MNEM_SUFFIX;
5186 else if (i.suffix == BYTE_MNEM_SUFFIX)
5189 && i.tm.opcode_modifier.ignoresize
5190 && i.tm.opcode_modifier.no_bsuf)
5192 else if (!check_byte_reg ())
5195 else if (i.suffix == LONG_MNEM_SUFFIX)
5198 && i.tm.opcode_modifier.ignoresize
5199 && i.tm.opcode_modifier.no_lsuf)
5201 else if (!check_long_reg ())
5204 else if (i.suffix == QWORD_MNEM_SUFFIX)
5207 && i.tm.opcode_modifier.ignoresize
5208 && i.tm.opcode_modifier.no_qsuf)
5210 else if (!check_qword_reg ())
5213 else if (i.suffix == WORD_MNEM_SUFFIX)
5216 && i.tm.opcode_modifier.ignoresize
5217 && i.tm.opcode_modifier.no_wsuf)
5219 else if (!check_word_reg ())
5222 else if (i.suffix == XMMWORD_MNEM_SUFFIX
5223 || i.suffix == YMMWORD_MNEM_SUFFIX
5224 || i.suffix == ZMMWORD_MNEM_SUFFIX)
5226 /* Skip if the instruction has x/y/z suffix. match_template
5227 should check if it is a valid suffix. */
5229 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5230 /* Do nothing if the instruction is going to ignore the prefix. */
5235 else if (i.tm.opcode_modifier.defaultsize
5237 /* exclude fldenv/frstor/fsave/fstenv */
5238 && i.tm.opcode_modifier.no_ssuf)
5240 i.suffix = stackop_size;
5242 else if (intel_syntax
5244 && (i.tm.operand_types[0].bitfield.jumpabsolute
5245 || i.tm.opcode_modifier.jumpbyte
5246 || i.tm.opcode_modifier.jumpintersegment
5247 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5248 && i.tm.extension_opcode <= 3)))
5253 if (!i.tm.opcode_modifier.no_qsuf)
5255 i.suffix = QWORD_MNEM_SUFFIX;
5259 if (!i.tm.opcode_modifier.no_lsuf)
5260 i.suffix = LONG_MNEM_SUFFIX;
5263 if (!i.tm.opcode_modifier.no_wsuf)
5264 i.suffix = WORD_MNEM_SUFFIX;
5273 if (i.tm.opcode_modifier.w)
5275 as_bad (_("no instruction mnemonic suffix given and "
5276 "no register operands; can't size instruction"));
5282 unsigned int suffixes;
5284 suffixes = !i.tm.opcode_modifier.no_bsuf;
5285 if (!i.tm.opcode_modifier.no_wsuf)
5287 if (!i.tm.opcode_modifier.no_lsuf)
5289 if (!i.tm.opcode_modifier.no_ldsuf)
5291 if (!i.tm.opcode_modifier.no_ssuf)
5293 if (!i.tm.opcode_modifier.no_qsuf)
5296 /* There are more than suffix matches. */
5297 if (i.tm.opcode_modifier.w
5298 || ((suffixes & (suffixes - 1))
5299 && !i.tm.opcode_modifier.defaultsize
5300 && !i.tm.opcode_modifier.ignoresize))
5302 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5308 /* Change the opcode based on the operand size given by i.suffix;
5309 We don't need to change things for byte insns. */
5312 && i.suffix != BYTE_MNEM_SUFFIX
5313 && i.suffix != XMMWORD_MNEM_SUFFIX
5314 && i.suffix != YMMWORD_MNEM_SUFFIX
5315 && i.suffix != ZMMWORD_MNEM_SUFFIX)
5317 /* It's not a byte, select word/dword operation. */
5318 if (i.tm.opcode_modifier.w)
5320 if (i.tm.opcode_modifier.shortform)
5321 i.tm.base_opcode |= 8;
5323 i.tm.base_opcode |= 1;
5326 /* Now select between word & dword operations via the operand
5327 size prefix, except for instructions that will ignore this
5329 if (i.tm.opcode_modifier.addrprefixop0)
5331 /* The address size override prefix changes the size of the
5333 if ((flag_code == CODE_32BIT
5334 && i.op->regs[0].reg_type.bitfield.reg16)
5335 || (flag_code != CODE_32BIT
5336 && i.op->regs[0].reg_type.bitfield.reg32))
5337 if (!add_prefix (ADDR_PREFIX_OPCODE))
5340 else if (i.suffix != QWORD_MNEM_SUFFIX
5341 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5342 && !i.tm.opcode_modifier.ignoresize
5343 && !i.tm.opcode_modifier.floatmf
5344 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5345 || (flag_code == CODE_64BIT
5346 && i.tm.opcode_modifier.jumpbyte)))
5348 unsigned int prefix = DATA_PREFIX_OPCODE;
5350 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5351 prefix = ADDR_PREFIX_OPCODE;
5353 if (!add_prefix (prefix))
5357 /* Set mode64 for an operand. */
5358 if (i.suffix == QWORD_MNEM_SUFFIX
5359 && flag_code == CODE_64BIT
5360 && !i.tm.opcode_modifier.norex64)
5362 /* Special case for xchg %rax,%rax. It is NOP and doesn't
5363 need rex64. cmpxchg8b is also a special case. */
5364 if (! (i.operands == 2
5365 && i.tm.base_opcode == 0x90
5366 && i.tm.extension_opcode == None
5367 && operand_type_equal (&i.types [0], &acc64)
5368 && operand_type_equal (&i.types [1], &acc64))
5369 && ! (i.operands == 1
5370 && i.tm.base_opcode == 0xfc7
5371 && i.tm.extension_opcode == 1
5372 && !operand_type_check (i.types [0], reg)
5373 && operand_type_check (i.types [0], anymem)))
5377 /* Size floating point instruction. */
5378 if (i.suffix == LONG_MNEM_SUFFIX)
5379 if (i.tm.opcode_modifier.floatmf)
5380 i.tm.base_opcode ^= 4;
5387 check_byte_reg (void)
5391 for (op = i.operands; --op >= 0;)
5393 /* If this is an eight bit register, it's OK. If it's the 16 or
5394 32 bit version of an eight bit register, we will just use the
5395 low portion, and that's OK too. */
5396 if (i.types[op].bitfield.reg8)
5399 /* I/O port address operands are OK too. */
5400 if (i.tm.operand_types[op].bitfield.inoutportreg)
5403 /* crc32 doesn't generate this warning. */
5404 if (i.tm.base_opcode == 0xf20f38f0)
5407 if ((i.types[op].bitfield.reg16
5408 || i.types[op].bitfield.reg32
5409 || i.types[op].bitfield.reg64)
5410 && i.op[op].regs->reg_num < 4
5411 /* Prohibit these changes in 64bit mode, since the lowering
5412 would be more complicated. */
5413 && flag_code != CODE_64BIT)
5415 #if REGISTER_WARNINGS
5416 if (!quiet_warnings)
5417 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5419 (i.op[op].regs + (i.types[op].bitfield.reg16
5420 ? REGNAM_AL - REGNAM_AX
5421 : REGNAM_AL - REGNAM_EAX))->reg_name,
5423 i.op[op].regs->reg_name,
5428 /* Any other register is bad. */
5429 if (i.types[op].bitfield.reg16
5430 || i.types[op].bitfield.reg32
5431 || i.types[op].bitfield.reg64
5432 || i.types[op].bitfield.regmmx
5433 || i.types[op].bitfield.regxmm
5434 || i.types[op].bitfield.regymm
5435 || i.types[op].bitfield.regzmm
5436 || i.types[op].bitfield.sreg2
5437 || i.types[op].bitfield.sreg3
5438 || i.types[op].bitfield.control
5439 || i.types[op].bitfield.debug
5440 || i.types[op].bitfield.test
5441 || i.types[op].bitfield.floatreg
5442 || i.types[op].bitfield.floatacc)
5444 as_bad (_("`%s%s' not allowed with `%s%c'"),
5446 i.op[op].regs->reg_name,
5456 check_long_reg (void)
5460 for (op = i.operands; --op >= 0;)
5461 /* Reject eight bit registers, except where the template requires
5462 them. (eg. movzb) */
5463 if (i.types[op].bitfield.reg8
5464 && (i.tm.operand_types[op].bitfield.reg16
5465 || i.tm.operand_types[op].bitfield.reg32
5466 || i.tm.operand_types[op].bitfield.acc))
5468 as_bad (_("`%s%s' not allowed with `%s%c'"),
5470 i.op[op].regs->reg_name,
5475 /* Warn if the e prefix on a general reg is missing. */
5476 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5477 && i.types[op].bitfield.reg16
5478 && (i.tm.operand_types[op].bitfield.reg32
5479 || i.tm.operand_types[op].bitfield.acc))
5481 /* Prohibit these changes in the 64bit mode, since the
5482 lowering is more complicated. */
5483 if (flag_code == CODE_64BIT)
5485 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5486 register_prefix, i.op[op].regs->reg_name,
5490 #if REGISTER_WARNINGS
5491 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5493 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5494 register_prefix, i.op[op].regs->reg_name, i.suffix);
5497 /* Warn if the r prefix on a general reg is present. */
5498 else if (i.types[op].bitfield.reg64
5499 && (i.tm.operand_types[op].bitfield.reg32
5500 || i.tm.operand_types[op].bitfield.acc))
5503 && i.tm.opcode_modifier.toqword
5504 && !i.types[0].bitfield.regxmm)
5506 /* Convert to QWORD. We want REX byte. */
5507 i.suffix = QWORD_MNEM_SUFFIX;
5511 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5512 register_prefix, i.op[op].regs->reg_name,
5521 check_qword_reg (void)
5525 for (op = i.operands; --op >= 0; )
5526 /* Reject eight bit registers, except where the template requires
5527 them. (eg. movzb) */
5528 if (i.types[op].bitfield.reg8
5529 && (i.tm.operand_types[op].bitfield.reg16
5530 || i.tm.operand_types[op].bitfield.reg32
5531 || i.tm.operand_types[op].bitfield.acc))
5533 as_bad (_("`%s%s' not allowed with `%s%c'"),
5535 i.op[op].regs->reg_name,
5540 /* Warn if the r prefix on a general reg is missing. */
5541 else if ((i.types[op].bitfield.reg16
5542 || i.types[op].bitfield.reg32)
5543 && (i.tm.operand_types[op].bitfield.reg32
5544 || i.tm.operand_types[op].bitfield.acc))
5546 /* Prohibit these changes in the 64bit mode, since the
5547 lowering is more complicated. */
5549 && i.tm.opcode_modifier.todword
5550 && !i.types[0].bitfield.regxmm)
5552 /* Convert to DWORD. We don't want REX byte. */
5553 i.suffix = LONG_MNEM_SUFFIX;
5557 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5558 register_prefix, i.op[op].regs->reg_name,
5567 check_word_reg (void)
5570 for (op = i.operands; --op >= 0;)
5571 /* Reject eight bit registers, except where the template requires
5572 them. (eg. movzb) */
5573 if (i.types[op].bitfield.reg8
5574 && (i.tm.operand_types[op].bitfield.reg16
5575 || i.tm.operand_types[op].bitfield.reg32
5576 || i.tm.operand_types[op].bitfield.acc))
5578 as_bad (_("`%s%s' not allowed with `%s%c'"),
5580 i.op[op].regs->reg_name,
5585 /* Warn if the e or r prefix on a general reg is present. */
5586 else if ((!quiet_warnings || flag_code == CODE_64BIT)
5587 && (i.types[op].bitfield.reg32
5588 || i.types[op].bitfield.reg64)
5589 && (i.tm.operand_types[op].bitfield.reg16
5590 || i.tm.operand_types[op].bitfield.acc))
5592 /* Prohibit these changes in the 64bit mode, since the
5593 lowering is more complicated. */
5594 if (flag_code == CODE_64BIT)
5596 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5597 register_prefix, i.op[op].regs->reg_name,
5601 #if REGISTER_WARNINGS
5602 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5604 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5605 register_prefix, i.op[op].regs->reg_name, i.suffix);
5612 update_imm (unsigned int j)
5614 i386_operand_type overlap = i.types[j];
5615 if ((overlap.bitfield.imm8
5616 || overlap.bitfield.imm8s
5617 || overlap.bitfield.imm16
5618 || overlap.bitfield.imm32
5619 || overlap.bitfield.imm32s
5620 || overlap.bitfield.imm64)
5621 && !operand_type_equal (&overlap, &imm8)
5622 && !operand_type_equal (&overlap, &imm8s)
5623 && !operand_type_equal (&overlap, &imm16)
5624 && !operand_type_equal (&overlap, &imm32)
5625 && !operand_type_equal (&overlap, &imm32s)
5626 && !operand_type_equal (&overlap, &imm64))
5630 i386_operand_type temp;
5632 operand_type_set (&temp, 0);
5633 if (i.suffix == BYTE_MNEM_SUFFIX)
5635 temp.bitfield.imm8 = overlap.bitfield.imm8;
5636 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5638 else if (i.suffix == WORD_MNEM_SUFFIX)
5639 temp.bitfield.imm16 = overlap.bitfield.imm16;
5640 else if (i.suffix == QWORD_MNEM_SUFFIX)
5642 temp.bitfield.imm64 = overlap.bitfield.imm64;
5643 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5646 temp.bitfield.imm32 = overlap.bitfield.imm32;
5649 else if (operand_type_equal (&overlap, &imm16_32_32s)
5650 || operand_type_equal (&overlap, &imm16_32)
5651 || operand_type_equal (&overlap, &imm16_32s))
5653 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5658 if (!operand_type_equal (&overlap, &imm8)
5659 && !operand_type_equal (&overlap, &imm8s)
5660 && !operand_type_equal (&overlap, &imm16)
5661 && !operand_type_equal (&overlap, &imm32)
5662 && !operand_type_equal (&overlap, &imm32s)
5663 && !operand_type_equal (&overlap, &imm64))
5665 as_bad (_("no instruction mnemonic suffix given; "
5666 "can't determine immediate size"));
5670 i.types[j] = overlap;
5680 /* Update the first 2 immediate operands. */
5681 n = i.operands > 2 ? 2 : i.operands;
5684 for (j = 0; j < n; j++)
5685 if (update_imm (j) == 0)
5688 /* The 3rd operand can't be immediate operand. */
5689 gas_assert (operand_type_check (i.types[2], imm) == 0);
5696 bad_implicit_operand (int xmm)
5698 const char *ireg = xmm ? "xmm0" : "ymm0";
5701 as_bad (_("the last operand of `%s' must be `%s%s'"),
5702 i.tm.name, register_prefix, ireg);
5704 as_bad (_("the first operand of `%s' must be `%s%s'"),
5705 i.tm.name, register_prefix, ireg);
5710 process_operands (void)
5712 /* Default segment register this instruction will use for memory
5713 accesses. 0 means unknown. This is only for optimizing out
5714 unnecessary segment overrides. */
5715 const seg_entry *default_seg = 0;
5717 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5719 unsigned int dupl = i.operands;
5720 unsigned int dest = dupl - 1;
5723 /* The destination must be an xmm register. */
5724 gas_assert (i.reg_operands
5725 && MAX_OPERANDS > dupl
5726 && operand_type_equal (&i.types[dest], ®xmm));
5728 if (i.tm.opcode_modifier.firstxmm0)
5730 /* The first operand is implicit and must be xmm0. */
5731 gas_assert (operand_type_equal (&i.types[0], ®xmm));
5732 if (register_number (i.op[0].regs) != 0)
5733 return bad_implicit_operand (1);
5735 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5737 /* Keep xmm0 for instructions with VEX prefix and 3
5743 /* We remove the first xmm0 and keep the number of
5744 operands unchanged, which in fact duplicates the
5746 for (j = 1; j < i.operands; j++)
5748 i.op[j - 1] = i.op[j];
5749 i.types[j - 1] = i.types[j];
5750 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5754 else if (i.tm.opcode_modifier.implicit1stxmm0)
5756 gas_assert ((MAX_OPERANDS - 1) > dupl
5757 && (i.tm.opcode_modifier.vexsources
5760 /* Add the implicit xmm0 for instructions with VEX prefix
5762 for (j = i.operands; j > 0; j--)
5764 i.op[j] = i.op[j - 1];
5765 i.types[j] = i.types[j - 1];
5766 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5769 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5770 i.types[0] = regxmm;
5771 i.tm.operand_types[0] = regxmm;
5774 i.reg_operands += 2;
5779 i.op[dupl] = i.op[dest];
5780 i.types[dupl] = i.types[dest];
5781 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5790 i.op[dupl] = i.op[dest];
5791 i.types[dupl] = i.types[dest];
5792 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5795 if (i.tm.opcode_modifier.immext)
5798 else if (i.tm.opcode_modifier.firstxmm0)
5802 /* The first operand is implicit and must be xmm0/ymm0/zmm0. */
5803 gas_assert (i.reg_operands
5804 && (operand_type_equal (&i.types[0], ®xmm)
5805 || operand_type_equal (&i.types[0], ®ymm)
5806 || operand_type_equal (&i.types[0], ®zmm)));
5807 if (register_number (i.op[0].regs) != 0)
5808 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5810 for (j = 1; j < i.operands; j++)
5812 i.op[j - 1] = i.op[j];
5813 i.types[j - 1] = i.types[j];
5815 /* We need to adjust fields in i.tm since they are used by
5816 build_modrm_byte. */
5817 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5824 else if (i.tm.opcode_modifier.regkludge)
5826 /* The imul $imm, %reg instruction is converted into
5827 imul $imm, %reg, %reg, and the clr %reg instruction
5828 is converted into xor %reg, %reg. */
5830 unsigned int first_reg_op;
5832 if (operand_type_check (i.types[0], reg))
5836 /* Pretend we saw the extra register operand. */
5837 gas_assert (i.reg_operands == 1
5838 && i.op[first_reg_op + 1].regs == 0);
5839 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5840 i.types[first_reg_op + 1] = i.types[first_reg_op];
5845 if (i.tm.opcode_modifier.shortform)
5847 if (i.types[0].bitfield.sreg2
5848 || i.types[0].bitfield.sreg3)
5850 if (i.tm.base_opcode == POP_SEG_SHORT
5851 && i.op[0].regs->reg_num == 1)
5853 as_bad (_("you can't `pop %scs'"), register_prefix);
5856 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5857 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5862 /* The register or float register operand is in operand
5866 if (i.types[0].bitfield.floatreg
5867 || operand_type_check (i.types[0], reg))
5871 /* Register goes in low 3 bits of opcode. */
5872 i.tm.base_opcode |= i.op[op].regs->reg_num;
5873 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5875 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5877 /* Warn about some common errors, but press on regardless.
5878 The first case can be generated by gcc (<= 2.8.1). */
5879 if (i.operands == 2)
5881 /* Reversed arguments on faddp, fsubp, etc. */
5882 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5883 register_prefix, i.op[!intel_syntax].regs->reg_name,
5884 register_prefix, i.op[intel_syntax].regs->reg_name);
5888 /* Extraneous `l' suffix on fp insn. */
5889 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5890 register_prefix, i.op[0].regs->reg_name);
5895 else if (i.tm.opcode_modifier.modrm)
5897 /* The opcode is completed (modulo i.tm.extension_opcode which
5898 must be put into the modrm byte). Now, we make the modrm and
5899 index base bytes based on all the info we've collected. */
5901 default_seg = build_modrm_byte ();
5903 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5907 else if (i.tm.opcode_modifier.isstring)
5909 /* For the string instructions that allow a segment override
5910 on one of their operands, the default segment is ds. */
5914 if (i.tm.base_opcode == 0x8d /* lea */
5917 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5919 /* If a segment was explicitly specified, and the specified segment
5920 is not the default, use an opcode prefix to select it. If we
5921 never figured out what the default segment is, then default_seg
5922 will be zero at this point, and the specified segment prefix will
5924 if ((i.seg[0]) && (i.seg[0] != default_seg))
5926 if (!add_prefix (i.seg[0]->seg_prefix))
5932 static const seg_entry *
5933 build_modrm_byte (void)
5935 const seg_entry *default_seg = 0;
5936 unsigned int source, dest;
5939 /* The first operand of instructions with VEX prefix and 3 sources
5940 must be VEX_Imm4. */
5941 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5944 unsigned int nds, reg_slot;
5947 if (i.tm.opcode_modifier.veximmext
5948 && i.tm.opcode_modifier.immext)
5950 dest = i.operands - 2;
5951 gas_assert (dest == 3);
5954 dest = i.operands - 1;
5957 /* There are 2 kinds of instructions:
5958 1. 5 operands: 4 register operands or 3 register operands
5959 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5960 VexW0 or VexW1. The destination must be either XMM, YMM or
5962 2. 4 operands: 4 register operands or 3 register operands
5963 plus 1 memory operand, VexXDS, and VexImmExt */
5964 gas_assert ((i.reg_operands == 4
5965 || (i.reg_operands == 3 && i.mem_operands == 1))
5966 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5967 && (i.tm.opcode_modifier.veximmext
5968 || (i.imm_operands == 1
5969 && i.types[0].bitfield.vec_imm4
5970 && (i.tm.opcode_modifier.vexw == VEXW0
5971 || i.tm.opcode_modifier.vexw == VEXW1)
5972 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5973 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)
5974 || operand_type_equal (&i.tm.operand_types[dest], ®zmm)))));
5976 if (i.imm_operands == 0)
5978 /* When there is no immediate operand, generate an 8bit
5979 immediate operand to encode the first operand. */
5980 exp = &im_expressions[i.imm_operands++];
5981 i.op[i.operands].imms = exp;
5982 i.types[i.operands] = imm8;
5984 /* If VexW1 is set, the first operand is the source and
5985 the second operand is encoded in the immediate operand. */
5986 if (i.tm.opcode_modifier.vexw == VEXW1)
5997 /* FMA swaps REG and NDS. */
5998 if (i.tm.cpu_flags.bitfield.cpufma)
6006 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6008 || operand_type_equal (&i.tm.operand_types[reg_slot],
6010 || operand_type_equal (&i.tm.operand_types[reg_slot],
6012 exp->X_op = O_constant;
6013 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
6014 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6018 unsigned int imm_slot;
6020 if (i.tm.opcode_modifier.vexw == VEXW0)
6022 /* If VexW0 is set, the third operand is the source and
6023 the second operand is encoded in the immediate
6030 /* VexW1 is set, the second operand is the source and
6031 the third operand is encoded in the immediate
6037 if (i.tm.opcode_modifier.immext)
6039 /* When ImmExt is set, the immdiate byte is the last
6041 imm_slot = i.operands - 1;
6049 /* Turn on Imm8 so that output_imm will generate it. */
6050 i.types[imm_slot].bitfield.imm8 = 1;
6053 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6055 || operand_type_equal (&i.tm.operand_types[reg_slot],
6057 || operand_type_equal (&i.tm.operand_types[reg_slot],
6059 i.op[imm_slot].imms->X_add_number
6060 |= register_number (i.op[reg_slot].regs) << 4;
6061 gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6064 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
6065 || operand_type_equal (&i.tm.operand_types[nds],
6067 || operand_type_equal (&i.tm.operand_types[nds],
6069 i.vex.register_specifier = i.op[nds].regs;
6074 /* i.reg_operands MUST be the number of real register operands;
6075 implicit registers do not count. If there are 3 register
6076 operands, it must be a instruction with VexNDS. For a
6077 instruction with VexNDD, the destination register is encoded
6078 in VEX prefix. If there are 4 register operands, it must be
6079 a instruction with VEX prefix and 3 sources. */
6080 if (i.mem_operands == 0
6081 && ((i.reg_operands == 2
6082 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6083 || (i.reg_operands == 3
6084 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6085 || (i.reg_operands == 4 && vex_3_sources)))
6093 /* When there are 3 operands, one of them may be immediate,
6094 which may be the first or the last operand. Otherwise,
6095 the first operand must be shift count register (cl) or it
6096 is an instruction with VexNDS. */
6097 gas_assert (i.imm_operands == 1
6098 || (i.imm_operands == 0
6099 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6100 || i.types[0].bitfield.shiftcount)));
6101 if (operand_type_check (i.types[0], imm)
6102 || i.types[0].bitfield.shiftcount)
6108 /* When there are 4 operands, the first two must be 8bit
6109 immediate operands. The source operand will be the 3rd
6112 For instructions with VexNDS, if the first operand
6113 an imm8, the source operand is the 2nd one. If the last
6114 operand is imm8, the source operand is the first one. */
6115 gas_assert ((i.imm_operands == 2
6116 && i.types[0].bitfield.imm8
6117 && i.types[1].bitfield.imm8)
6118 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6119 && i.imm_operands == 1
6120 && (i.types[0].bitfield.imm8
6121 || i.types[i.operands - 1].bitfield.imm8
6123 if (i.imm_operands == 2)
6127 if (i.types[0].bitfield.imm8)
6134 if (i.tm.opcode_modifier.evex)
6136 /* For EVEX instructions, when there are 5 operands, the
6137 first one must be immediate operand. If the second one
6138 is immediate operand, the source operand is the 3th
6139 one. If the last one is immediate operand, the source
6140 operand is the 2nd one. */
6141 gas_assert (i.imm_operands == 2
6142 && i.tm.opcode_modifier.sae
6143 && operand_type_check (i.types[0], imm));
6144 if (operand_type_check (i.types[1], imm))
6146 else if (operand_type_check (i.types[4], imm))
6160 /* RC/SAE operand could be between DEST and SRC. That happens
6161 when one operand is GPR and the other one is XMM/YMM/ZMM
6163 if (i.rounding && i.rounding->operand == (int) dest)
6166 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6168 /* For instructions with VexNDS, the register-only source
6169 operand must be 32/64bit integer, XMM, YMM or ZMM
6170 register. It is encoded in VEX prefix. We need to
6171 clear RegMem bit before calling operand_type_equal. */
6173 i386_operand_type op;
6176 /* Check register-only source operand when two source
6177 operands are swapped. */
6178 if (!i.tm.operand_types[source].bitfield.baseindex
6179 && i.tm.operand_types[dest].bitfield.baseindex)
6187 op = i.tm.operand_types[vvvv];
6188 op.bitfield.regmem = 0;
6189 if ((dest + 1) >= i.operands
6190 || (!op.bitfield.reg32
6191 && op.bitfield.reg64
6192 && !operand_type_equal (&op, ®xmm)
6193 && !operand_type_equal (&op, ®ymm)
6194 && !operand_type_equal (&op, ®zmm)
6195 && !operand_type_equal (&op, ®mask)))
6197 i.vex.register_specifier = i.op[vvvv].regs;
6203 /* One of the register operands will be encoded in the i.tm.reg
6204 field, the other in the combined i.tm.mode and i.tm.regmem
6205 fields. If no form of this instruction supports a memory
6206 destination operand, then we assume the source operand may
6207 sometimes be a memory operand and so we need to store the
6208 destination in the i.rm.reg field. */
6209 if (!i.tm.operand_types[dest].bitfield.regmem
6210 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6212 i.rm.reg = i.op[dest].regs->reg_num;
6213 i.rm.regmem = i.op[source].regs->reg_num;
6214 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6216 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6218 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6220 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6225 i.rm.reg = i.op[source].regs->reg_num;
6226 i.rm.regmem = i.op[dest].regs->reg_num;
6227 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6229 if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6231 if ((i.op[source].regs->reg_flags & RegRex) != 0)
6233 if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6236 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6238 if (!i.types[0].bitfield.control
6239 && !i.types[1].bitfield.control)
6241 i.rex &= ~(REX_R | REX_B);
6242 add_prefix (LOCK_PREFIX_OPCODE);
6246 { /* If it's not 2 reg operands... */
6251 unsigned int fake_zero_displacement = 0;
6254 for (op = 0; op < i.operands; op++)
6255 if (operand_type_check (i.types[op], anymem))
6257 gas_assert (op < i.operands);
6259 if (i.tm.opcode_modifier.vecsib)
6261 if (i.index_reg->reg_num == RegEiz
6262 || i.index_reg->reg_num == RegRiz)
6265 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6268 i.sib.base = NO_BASE_REGISTER;
6269 i.sib.scale = i.log2_scale_factor;
6270 /* No Vec_Disp8 if there is no base. */
6271 i.types[op].bitfield.vec_disp8 = 0;
6272 i.types[op].bitfield.disp8 = 0;
6273 i.types[op].bitfield.disp16 = 0;
6274 i.types[op].bitfield.disp64 = 0;
6275 if (flag_code != CODE_64BIT)
6277 /* Must be 32 bit */
6278 i.types[op].bitfield.disp32 = 1;
6279 i.types[op].bitfield.disp32s = 0;
6283 i.types[op].bitfield.disp32 = 0;
6284 i.types[op].bitfield.disp32s = 1;
6287 i.sib.index = i.index_reg->reg_num;
6288 if ((i.index_reg->reg_flags & RegRex) != 0)
6290 if ((i.index_reg->reg_flags & RegVRex) != 0)
6296 if (i.base_reg == 0)
6299 if (!i.disp_operands)
6301 fake_zero_displacement = 1;
6302 /* Instructions with VSIB byte need 32bit displacement
6303 if there is no base register. */
6304 if (i.tm.opcode_modifier.vecsib)
6305 i.types[op].bitfield.disp32 = 1;
6307 if (i.index_reg == 0)
6309 gas_assert (!i.tm.opcode_modifier.vecsib);
6310 /* Operand is just <disp> */
6311 if (flag_code == CODE_64BIT)
6313 /* 64bit mode overwrites the 32bit absolute
6314 addressing by RIP relative addressing and
6315 absolute addressing is encoded by one of the
6316 redundant SIB forms. */
6317 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6318 i.sib.base = NO_BASE_REGISTER;
6319 i.sib.index = NO_INDEX_REGISTER;
6320 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6321 ? disp32s : disp32);
6323 else if ((flag_code == CODE_16BIT)
6324 ^ (i.prefix[ADDR_PREFIX] != 0))
6326 i.rm.regmem = NO_BASE_REGISTER_16;
6327 i.types[op] = disp16;
6331 i.rm.regmem = NO_BASE_REGISTER;
6332 i.types[op] = disp32;
6335 else if (!i.tm.opcode_modifier.vecsib)
6337 /* !i.base_reg && i.index_reg */
6338 if (i.index_reg->reg_num == RegEiz
6339 || i.index_reg->reg_num == RegRiz)
6340 i.sib.index = NO_INDEX_REGISTER;
6342 i.sib.index = i.index_reg->reg_num;
6343 i.sib.base = NO_BASE_REGISTER;
6344 i.sib.scale = i.log2_scale_factor;
6345 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6346 /* No Vec_Disp8 if there is no base. */
6347 i.types[op].bitfield.vec_disp8 = 0;
6348 i.types[op].bitfield.disp8 = 0;
6349 i.types[op].bitfield.disp16 = 0;
6350 i.types[op].bitfield.disp64 = 0;
6351 if (flag_code != CODE_64BIT)
6353 /* Must be 32 bit */
6354 i.types[op].bitfield.disp32 = 1;
6355 i.types[op].bitfield.disp32s = 0;
6359 i.types[op].bitfield.disp32 = 0;
6360 i.types[op].bitfield.disp32s = 1;
6362 if ((i.index_reg->reg_flags & RegRex) != 0)
6366 /* RIP addressing for 64bit mode. */
6367 else if (i.base_reg->reg_num == RegRip ||
6368 i.base_reg->reg_num == RegEip)
6370 gas_assert (!i.tm.opcode_modifier.vecsib);
6371 i.rm.regmem = NO_BASE_REGISTER;
6372 i.types[op].bitfield.disp8 = 0;
6373 i.types[op].bitfield.disp16 = 0;
6374 i.types[op].bitfield.disp32 = 0;
6375 i.types[op].bitfield.disp32s = 1;
6376 i.types[op].bitfield.disp64 = 0;
6377 i.types[op].bitfield.vec_disp8 = 0;
6378 i.flags[op] |= Operand_PCrel;
6379 if (! i.disp_operands)
6380 fake_zero_displacement = 1;
6382 else if (i.base_reg->reg_type.bitfield.reg16)
6384 gas_assert (!i.tm.opcode_modifier.vecsib);
6385 switch (i.base_reg->reg_num)
6388 if (i.index_reg == 0)
6390 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
6391 i.rm.regmem = i.index_reg->reg_num - 6;
6395 if (i.index_reg == 0)
6398 if (operand_type_check (i.types[op], disp) == 0)
6400 /* fake (%bp) into 0(%bp) */
6401 if (i.tm.operand_types[op].bitfield.vec_disp8)
6402 i.types[op].bitfield.vec_disp8 = 1;
6404 i.types[op].bitfield.disp8 = 1;
6405 fake_zero_displacement = 1;
6408 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
6409 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6411 default: /* (%si) -> 4 or (%di) -> 5 */
6412 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6414 i.rm.mode = mode_from_disp_size (i.types[op]);
6416 else /* i.base_reg and 32/64 bit mode */
6418 if (flag_code == CODE_64BIT
6419 && operand_type_check (i.types[op], disp))
6421 i386_operand_type temp;
6422 operand_type_set (&temp, 0);
6423 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6424 temp.bitfield.vec_disp8
6425 = i.types[op].bitfield.vec_disp8;
6427 if (i.prefix[ADDR_PREFIX] == 0)
6428 i.types[op].bitfield.disp32s = 1;
6430 i.types[op].bitfield.disp32 = 1;
6433 if (!i.tm.opcode_modifier.vecsib)
6434 i.rm.regmem = i.base_reg->reg_num;
6435 if ((i.base_reg->reg_flags & RegRex) != 0)
6437 i.sib.base = i.base_reg->reg_num;
6438 /* x86-64 ignores REX prefix bit here to avoid decoder
6440 if (!(i.base_reg->reg_flags & RegRex)
6441 && (i.base_reg->reg_num == EBP_REG_NUM
6442 || i.base_reg->reg_num == ESP_REG_NUM))
6444 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6446 fake_zero_displacement = 1;
6447 if (i.tm.operand_types [op].bitfield.vec_disp8)
6448 i.types[op].bitfield.vec_disp8 = 1;
6450 i.types[op].bitfield.disp8 = 1;
6452 i.sib.scale = i.log2_scale_factor;
6453 if (i.index_reg == 0)
6455 gas_assert (!i.tm.opcode_modifier.vecsib);
6456 /* <disp>(%esp) becomes two byte modrm with no index
6457 register. We've already stored the code for esp
6458 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6459 Any base register besides %esp will not use the
6460 extra modrm byte. */
6461 i.sib.index = NO_INDEX_REGISTER;
6463 else if (!i.tm.opcode_modifier.vecsib)
6465 if (i.index_reg->reg_num == RegEiz
6466 || i.index_reg->reg_num == RegRiz)
6467 i.sib.index = NO_INDEX_REGISTER;
6469 i.sib.index = i.index_reg->reg_num;
6470 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6471 if ((i.index_reg->reg_flags & RegRex) != 0)
6476 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6477 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6481 if (!fake_zero_displacement
6485 fake_zero_displacement = 1;
6486 if (i.disp_encoding == disp_encoding_8bit)
6487 i.types[op].bitfield.disp8 = 1;
6489 i.types[op].bitfield.disp32 = 1;
6491 i.rm.mode = mode_from_disp_size (i.types[op]);
6495 if (fake_zero_displacement)
6497 /* Fakes a zero displacement assuming that i.types[op]
6498 holds the correct displacement size. */
6501 gas_assert (i.op[op].disps == 0);
6502 exp = &disp_expressions[i.disp_operands++];
6503 i.op[op].disps = exp;
6504 exp->X_op = O_constant;
6505 exp->X_add_number = 0;
6506 exp->X_add_symbol = (symbolS *) 0;
6507 exp->X_op_symbol = (symbolS *) 0;
6515 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6517 if (operand_type_check (i.types[0], imm))
6518 i.vex.register_specifier = NULL;
6521 /* VEX.vvvv encodes one of the sources when the first
6522 operand is not an immediate. */
6523 if (i.tm.opcode_modifier.vexw == VEXW0)
6524 i.vex.register_specifier = i.op[0].regs;
6526 i.vex.register_specifier = i.op[1].regs;
6529 /* Destination is a XMM register encoded in the ModRM.reg
6531 i.rm.reg = i.op[2].regs->reg_num;
6532 if ((i.op[2].regs->reg_flags & RegRex) != 0)
6535 /* ModRM.rm and VEX.B encodes the other source. */
6536 if (!i.mem_operands)
6540 if (i.tm.opcode_modifier.vexw == VEXW0)
6541 i.rm.regmem = i.op[1].regs->reg_num;
6543 i.rm.regmem = i.op[0].regs->reg_num;
6545 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6549 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6551 i.vex.register_specifier = i.op[2].regs;
6552 if (!i.mem_operands)
6555 i.rm.regmem = i.op[1].regs->reg_num;
6556 if ((i.op[1].regs->reg_flags & RegRex) != 0)
6560 /* Fill in i.rm.reg or i.rm.regmem field with register operand
6561 (if any) based on i.tm.extension_opcode. Again, we must be
6562 careful to make sure that segment/control/debug/test/MMX
6563 registers are coded into the i.rm.reg field. */
6564 else if (i.reg_operands)
6567 unsigned int vex_reg = ~0;
6569 for (op = 0; op < i.operands; op++)
6570 if (i.types[op].bitfield.reg8
6571 || i.types[op].bitfield.reg16
6572 || i.types[op].bitfield.reg32
6573 || i.types[op].bitfield.reg64
6574 || i.types[op].bitfield.regmmx
6575 || i.types[op].bitfield.regxmm
6576 || i.types[op].bitfield.regymm
6577 || i.types[op].bitfield.regbnd
6578 || i.types[op].bitfield.regzmm
6579 || i.types[op].bitfield.regmask
6580 || i.types[op].bitfield.sreg2
6581 || i.types[op].bitfield.sreg3
6582 || i.types[op].bitfield.control
6583 || i.types[op].bitfield.debug
6584 || i.types[op].bitfield.test)
6589 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6591 /* For instructions with VexNDS, the register-only
6592 source operand is encoded in VEX prefix. */
6593 gas_assert (mem != (unsigned int) ~0);
6598 gas_assert (op < i.operands);
6602 /* Check register-only source operand when two source
6603 operands are swapped. */
6604 if (!i.tm.operand_types[op].bitfield.baseindex
6605 && i.tm.operand_types[op + 1].bitfield.baseindex)
6609 gas_assert (mem == (vex_reg + 1)
6610 && op < i.operands);
6615 gas_assert (vex_reg < i.operands);
6619 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6621 /* For instructions with VexNDD, the register destination
6622 is encoded in VEX prefix. */
6623 if (i.mem_operands == 0)
6625 /* There is no memory operand. */
6626 gas_assert ((op + 2) == i.operands);
6631 /* There are only 2 operands. */
6632 gas_assert (op < 2 && i.operands == 2);
6637 gas_assert (op < i.operands);
6639 if (vex_reg != (unsigned int) ~0)
6641 i386_operand_type *type = &i.tm.operand_types[vex_reg];
6643 if (type->bitfield.reg32 != 1
6644 && type->bitfield.reg64 != 1
6645 && !operand_type_equal (type, ®xmm)
6646 && !operand_type_equal (type, ®ymm)
6647 && !operand_type_equal (type, ®zmm)
6648 && !operand_type_equal (type, ®mask))
6651 i.vex.register_specifier = i.op[vex_reg].regs;
6654 /* Don't set OP operand twice. */
6657 /* If there is an extension opcode to put here, the
6658 register number must be put into the regmem field. */
6659 if (i.tm.extension_opcode != None)
6661 i.rm.regmem = i.op[op].regs->reg_num;
6662 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6664 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6669 i.rm.reg = i.op[op].regs->reg_num;
6670 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6672 if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6677 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6678 must set it to 3 to indicate this is a register operand
6679 in the regmem field. */
6680 if (!i.mem_operands)
6684 /* Fill in i.rm.reg field with extension opcode (if any). */
6685 if (i.tm.extension_opcode != None)
6686 i.rm.reg = i.tm.extension_opcode;
6692 output_branch (void)
6698 relax_substateT subtype;
6702 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6703 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6706 if (i.prefix[DATA_PREFIX] != 0)
6712 /* Pentium4 branch hints. */
6713 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6714 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6719 if (i.prefix[REX_PREFIX] != 0)
6725 /* BND prefixed jump. */
6726 if (i.prefix[BND_PREFIX] != 0)
6728 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6732 if (i.prefixes != 0 && !intel_syntax)
6733 as_warn (_("skipping prefixes on this instruction"));
6735 /* It's always a symbol; End frag & setup for relax.
6736 Make sure there is enough room in this frag for the largest
6737 instruction we may generate in md_convert_frag. This is 2
6738 bytes for the opcode and room for the prefix and largest
6740 frag_grow (prefix + 2 + 4);
6741 /* Prefix and 1 opcode byte go in fr_fix. */
6742 p = frag_more (prefix + 1);
6743 if (i.prefix[DATA_PREFIX] != 0)
6744 *p++ = DATA_PREFIX_OPCODE;
6745 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6746 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6747 *p++ = i.prefix[SEG_PREFIX];
6748 if (i.prefix[REX_PREFIX] != 0)
6749 *p++ = i.prefix[REX_PREFIX];
6750 *p = i.tm.base_opcode;
6752 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6753 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6754 else if (cpu_arch_flags.bitfield.cpui386)
6755 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6757 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6760 sym = i.op[0].disps->X_add_symbol;
6761 off = i.op[0].disps->X_add_number;
6763 if (i.op[0].disps->X_op != O_constant
6764 && i.op[0].disps->X_op != O_symbol)
6766 /* Handle complex expressions. */
6767 sym = make_expr_symbol (i.op[0].disps);
6771 /* 1 possible extra opcode + 4 byte displacement go in var part.
6772 Pass reloc in fr_var. */
6773 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6783 if (i.tm.opcode_modifier.jumpbyte)
6785 /* This is a loop or jecxz type instruction. */
6787 if (i.prefix[ADDR_PREFIX] != 0)
6789 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6792 /* Pentium4 branch hints. */
6793 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6794 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6796 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6805 if (flag_code == CODE_16BIT)
6808 if (i.prefix[DATA_PREFIX] != 0)
6810 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6820 if (i.prefix[REX_PREFIX] != 0)
6822 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6826 /* BND prefixed jump. */
6827 if (i.prefix[BND_PREFIX] != 0)
6829 FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6833 if (i.prefixes != 0 && !intel_syntax)
6834 as_warn (_("skipping prefixes on this instruction"));
6836 p = frag_more (i.tm.opcode_length + size);
6837 switch (i.tm.opcode_length)
6840 *p++ = i.tm.base_opcode >> 8;
6842 *p++ = i.tm.base_opcode;
6848 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6849 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6851 /* All jumps handled here are signed, but don't use a signed limit
6852 check for 32 and 16 bit jumps as we want to allow wrap around at
6853 4G and 64k respectively. */
6855 fixP->fx_signed = 1;
6859 output_interseg_jump (void)
6867 if (flag_code == CODE_16BIT)
6871 if (i.prefix[DATA_PREFIX] != 0)
6877 if (i.prefix[REX_PREFIX] != 0)
6887 if (i.prefixes != 0 && !intel_syntax)
6888 as_warn (_("skipping prefixes on this instruction"));
6890 /* 1 opcode; 2 segment; offset */
6891 p = frag_more (prefix + 1 + 2 + size);
6893 if (i.prefix[DATA_PREFIX] != 0)
6894 *p++ = DATA_PREFIX_OPCODE;
6896 if (i.prefix[REX_PREFIX] != 0)
6897 *p++ = i.prefix[REX_PREFIX];
6899 *p++ = i.tm.base_opcode;
6900 if (i.op[1].imms->X_op == O_constant)
6902 offsetT n = i.op[1].imms->X_add_number;
6905 && !fits_in_unsigned_word (n)
6906 && !fits_in_signed_word (n))
6908 as_bad (_("16-bit jump out of range"));
6911 md_number_to_chars (p, n, size);
6914 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6915 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6916 if (i.op[0].imms->X_op != O_constant)
6917 as_bad (_("can't handle non absolute segment in `%s'"),
6919 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6925 fragS *insn_start_frag;
6926 offsetT insn_start_off;
6928 /* Tie dwarf2 debug info to the address at the start of the insn.
6929 We can't do this after the insn has been output as the current
6930 frag may have been closed off. eg. by frag_var. */
6931 dwarf2_emit_insn (0);
6933 insn_start_frag = frag_now;
6934 insn_start_off = frag_now_fix ();
6937 if (i.tm.opcode_modifier.jump)
6939 else if (i.tm.opcode_modifier.jumpbyte
6940 || i.tm.opcode_modifier.jumpdword)
6942 else if (i.tm.opcode_modifier.jumpintersegment)
6943 output_interseg_jump ();
6946 /* Output normal instructions here. */
6950 unsigned int prefix;
6952 /* Some processors fail on LOCK prefix. This options makes
6953 assembler ignore LOCK prefix and serves as a workaround. */
6954 if (omit_lock_prefix)
6956 if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
6958 i.prefix[LOCK_PREFIX] = 0;
6961 /* Since the VEX/EVEX prefix contains the implicit prefix, we
6962 don't need the explicit prefix. */
6963 if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
6965 switch (i.tm.opcode_length)
6968 if (i.tm.base_opcode & 0xff000000)
6970 prefix = (i.tm.base_opcode >> 24) & 0xff;
6975 if ((i.tm.base_opcode & 0xff0000) != 0)
6977 prefix = (i.tm.base_opcode >> 16) & 0xff;
6978 if (i.tm.cpu_flags.bitfield.cpupadlock)
6981 if (prefix != REPE_PREFIX_OPCODE
6982 || (i.prefix[REP_PREFIX]
6983 != REPE_PREFIX_OPCODE))
6984 add_prefix (prefix);
6987 add_prefix (prefix);
6996 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
6997 /* For x32, add a dummy REX_OPCODE prefix for mov/add with
6998 R_X86_64_GOTTPOFF relocation so that linker can safely
6999 perform IE->LE optimization. */
7000 if (x86_elf_abi == X86_64_X32_ABI
7002 && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
7003 && i.prefix[REX_PREFIX] == 0)
7004 add_prefix (REX_OPCODE);
7007 /* The prefix bytes. */
7008 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
7010 FRAG_APPEND_1_CHAR (*q);
7014 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
7019 /* REX byte is encoded in VEX prefix. */
7023 FRAG_APPEND_1_CHAR (*q);
7026 /* There should be no other prefixes for instructions
7031 /* For EVEX instructions i.vrex should become 0 after
7032 build_evex_prefix. For VEX instructions upper 16 registers
7033 aren't available, so VREX should be 0. */
7036 /* Now the VEX prefix. */
7037 p = frag_more (i.vex.length);
7038 for (j = 0; j < i.vex.length; j++)
7039 p[j] = i.vex.bytes[j];
7042 /* Now the opcode; be careful about word order here! */
7043 if (i.tm.opcode_length == 1)
7045 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
7049 switch (i.tm.opcode_length)
7053 *p++ = (i.tm.base_opcode >> 24) & 0xff;
7054 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7058 *p++ = (i.tm.base_opcode >> 16) & 0xff;
7068 /* Put out high byte first: can't use md_number_to_chars! */
7069 *p++ = (i.tm.base_opcode >> 8) & 0xff;
7070 *p = i.tm.base_opcode & 0xff;
7073 /* Now the modrm byte and sib byte (if present). */
7074 if (i.tm.opcode_modifier.modrm)
7076 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7079 /* If i.rm.regmem == ESP (4)
7080 && i.rm.mode != (Register mode)
7082 ==> need second modrm byte. */
7083 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7085 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7086 FRAG_APPEND_1_CHAR ((i.sib.base << 0
7088 | i.sib.scale << 6));
7091 if (i.disp_operands)
7092 output_disp (insn_start_frag, insn_start_off);
7095 output_imm (insn_start_frag, insn_start_off);
7101 pi ("" /*line*/, &i);
7103 #endif /* DEBUG386 */
7106 /* Return the size of the displacement operand N. */
7109 disp_size (unsigned int n)
7113 /* Vec_Disp8 has to be 8bit. */
7114 if (i.types[n].bitfield.vec_disp8)
7116 else if (i.types[n].bitfield.disp64)
7118 else if (i.types[n].bitfield.disp8)
7120 else if (i.types[n].bitfield.disp16)
7125 /* Return the size of the immediate operand N. */
7128 imm_size (unsigned int n)
7131 if (i.types[n].bitfield.imm64)
7133 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7135 else if (i.types[n].bitfield.imm16)
7141 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7146 for (n = 0; n < i.operands; n++)
7148 if (i.types[n].bitfield.vec_disp8
7149 || operand_type_check (i.types[n], disp))
7151 if (i.op[n].disps->X_op == O_constant)
7153 int size = disp_size (n);
7154 offsetT val = i.op[n].disps->X_add_number;
7156 if (i.types[n].bitfield.vec_disp8)
7158 val = offset_in_range (val, size);
7159 p = frag_more (size);
7160 md_number_to_chars (p, val, size);
7164 enum bfd_reloc_code_real reloc_type;
7165 int size = disp_size (n);
7166 int sign = i.types[n].bitfield.disp32s;
7167 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7169 /* We can't have 8 bit displacement here. */
7170 gas_assert (!i.types[n].bitfield.disp8);
7172 /* The PC relative address is computed relative
7173 to the instruction boundary, so in case immediate
7174 fields follows, we need to adjust the value. */
7175 if (pcrel && i.imm_operands)
7180 for (n1 = 0; n1 < i.operands; n1++)
7181 if (operand_type_check (i.types[n1], imm))
7183 /* Only one immediate is allowed for PC
7184 relative address. */
7185 gas_assert (sz == 0);
7187 i.op[n].disps->X_add_number -= sz;
7189 /* We should find the immediate. */
7190 gas_assert (sz != 0);
7193 p = frag_more (size);
7194 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7196 && GOT_symbol == i.op[n].disps->X_add_symbol
7197 && (((reloc_type == BFD_RELOC_32
7198 || reloc_type == BFD_RELOC_X86_64_32S
7199 || (reloc_type == BFD_RELOC_64
7201 && (i.op[n].disps->X_op == O_symbol
7202 || (i.op[n].disps->X_op == O_add
7203 && ((symbol_get_value_expression
7204 (i.op[n].disps->X_op_symbol)->X_op)
7206 || reloc_type == BFD_RELOC_32_PCREL))
7210 if (insn_start_frag == frag_now)
7211 add = (p - frag_now->fr_literal) - insn_start_off;
7216 add = insn_start_frag->fr_fix - insn_start_off;
7217 for (fr = insn_start_frag->fr_next;
7218 fr && fr != frag_now; fr = fr->fr_next)
7220 add += p - frag_now->fr_literal;
7225 reloc_type = BFD_RELOC_386_GOTPC;
7226 i.op[n].imms->X_add_number += add;
7228 else if (reloc_type == BFD_RELOC_64)
7229 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7231 /* Don't do the adjustment for x86-64, as there
7232 the pcrel addressing is relative to the _next_
7233 insn, and that is taken care of in other code. */
7234 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7236 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7237 i.op[n].disps, pcrel, reloc_type);
7244 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7249 for (n = 0; n < i.operands; n++)
7251 /* Skip SAE/RC Imm operand in EVEX. They are already handled. */
7252 if (i.rounding && (int) n == i.rounding->operand)
7255 if (operand_type_check (i.types[n], imm))
7257 if (i.op[n].imms->X_op == O_constant)
7259 int size = imm_size (n);
7262 val = offset_in_range (i.op[n].imms->X_add_number,
7264 p = frag_more (size);
7265 md_number_to_chars (p, val, size);
7269 /* Not absolute_section.
7270 Need a 32-bit fixup (don't support 8bit
7271 non-absolute imms). Try to support other
7273 enum bfd_reloc_code_real reloc_type;
7274 int size = imm_size (n);
7277 if (i.types[n].bitfield.imm32s
7278 && (i.suffix == QWORD_MNEM_SUFFIX
7279 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7284 p = frag_more (size);
7285 reloc_type = reloc (size, 0, sign, i.reloc[n]);
7287 /* This is tough to explain. We end up with this one if we
7288 * have operands that look like
7289 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
7290 * obtain the absolute address of the GOT, and it is strongly
7291 * preferable from a performance point of view to avoid using
7292 * a runtime relocation for this. The actual sequence of
7293 * instructions often look something like:
7298 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7300 * The call and pop essentially return the absolute address
7301 * of the label .L66 and store it in %ebx. The linker itself
7302 * will ultimately change the first operand of the addl so
7303 * that %ebx points to the GOT, but to keep things simple, the
7304 * .o file must have this operand set so that it generates not
7305 * the absolute address of .L66, but the absolute address of
7306 * itself. This allows the linker itself simply treat a GOTPC
7307 * relocation as asking for a pcrel offset to the GOT to be
7308 * added in, and the addend of the relocation is stored in the
7309 * operand field for the instruction itself.
7311 * Our job here is to fix the operand so that it would add
7312 * the correct offset so that %ebx would point to itself. The
7313 * thing that is tricky is that .-.L66 will point to the
7314 * beginning of the instruction, so we need to further modify
7315 * the operand so that it will point to itself. There are
7316 * other cases where you have something like:
7318 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
7320 * and here no correction would be required. Internally in
7321 * the assembler we treat operands of this form as not being
7322 * pcrel since the '.' is explicitly mentioned, and I wonder
7323 * whether it would simplify matters to do it this way. Who
7324 * knows. In earlier versions of the PIC patches, the
7325 * pcrel_adjust field was used to store the correction, but
7326 * since the expression is not pcrel, I felt it would be
7327 * confusing to do it this way. */
7329 if ((reloc_type == BFD_RELOC_32
7330 || reloc_type == BFD_RELOC_X86_64_32S
7331 || reloc_type == BFD_RELOC_64)
7333 && GOT_symbol == i.op[n].imms->X_add_symbol
7334 && (i.op[n].imms->X_op == O_symbol
7335 || (i.op[n].imms->X_op == O_add
7336 && ((symbol_get_value_expression
7337 (i.op[n].imms->X_op_symbol)->X_op)
7342 if (insn_start_frag == frag_now)
7343 add = (p - frag_now->fr_literal) - insn_start_off;
7348 add = insn_start_frag->fr_fix - insn_start_off;
7349 for (fr = insn_start_frag->fr_next;
7350 fr && fr != frag_now; fr = fr->fr_next)
7352 add += p - frag_now->fr_literal;
7356 reloc_type = BFD_RELOC_386_GOTPC;
7358 reloc_type = BFD_RELOC_X86_64_GOTPC32;
7360 reloc_type = BFD_RELOC_X86_64_GOTPC64;
7361 i.op[n].imms->X_add_number += add;
7363 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7364 i.op[n].imms, 0, reloc_type);
7370 /* x86_cons_fix_new is called via the expression parsing code when a
7371 reloc is needed. We use this hook to get the correct .got reloc. */
7372 static int cons_sign = -1;
7375 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7376 expressionS *exp, bfd_reloc_code_real_type r)
7378 r = reloc (len, 0, cons_sign, r);
7381 if (exp->X_op == O_secrel)
7383 exp->X_op = O_symbol;
7384 r = BFD_RELOC_32_SECREL;
7388 fix_new_exp (frag, off, len, exp, 0, r);
7391 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7392 purpose of the `.dc.a' internal pseudo-op. */
7395 x86_address_bytes (void)
7397 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7399 return stdoutput->arch_info->bits_per_address / 8;
7402 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7404 # define lex_got(reloc, adjust, types) NULL
7406 /* Parse operands of the form
7407 <symbol>@GOTOFF+<nnn>
7408 and similar .plt or .got references.
7410 If we find one, set up the correct relocation in RELOC and copy the
7411 input string, minus the `@GOTOFF' into a malloc'd buffer for
7412 parsing by the calling routine. Return this buffer, and if ADJUST
7413 is non-null set it to the length of the string we removed from the
7414 input line. Otherwise return NULL. */
7416 lex_got (enum bfd_reloc_code_real *rel,
7418 i386_operand_type *types)
7420 /* Some of the relocations depend on the size of what field is to
7421 be relocated. But in our callers i386_immediate and i386_displacement
7422 we don't yet know the operand size (this will be set by insn
7423 matching). Hence we record the word32 relocation here,
7424 and adjust the reloc according to the real size in reloc(). */
7425 static const struct {
7428 const enum bfd_reloc_code_real rel[2];
7429 const i386_operand_type types64;
7431 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7432 { STRING_COMMA_LEN ("SIZE"), { BFD_RELOC_SIZE32,
7434 OPERAND_TYPE_IMM32_64 },
7436 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
7437 BFD_RELOC_X86_64_PLTOFF64 },
7438 OPERAND_TYPE_IMM64 },
7439 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
7440 BFD_RELOC_X86_64_PLT32 },
7441 OPERAND_TYPE_IMM32_32S_DISP32 },
7442 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
7443 BFD_RELOC_X86_64_GOTPLT64 },
7444 OPERAND_TYPE_IMM64_DISP64 },
7445 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
7446 BFD_RELOC_X86_64_GOTOFF64 },
7447 OPERAND_TYPE_IMM64_DISP64 },
7448 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7449 BFD_RELOC_X86_64_GOTPCREL },
7450 OPERAND_TYPE_IMM32_32S_DISP32 },
7451 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
7452 BFD_RELOC_X86_64_TLSGD },
7453 OPERAND_TYPE_IMM32_32S_DISP32 },
7454 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
7455 _dummy_first_bfd_reloc_code_real },
7456 OPERAND_TYPE_NONE },
7457 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
7458 BFD_RELOC_X86_64_TLSLD },
7459 OPERAND_TYPE_IMM32_32S_DISP32 },
7460 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7461 BFD_RELOC_X86_64_GOTTPOFF },
7462 OPERAND_TYPE_IMM32_32S_DISP32 },
7463 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
7464 BFD_RELOC_X86_64_TPOFF32 },
7465 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7466 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
7467 _dummy_first_bfd_reloc_code_real },
7468 OPERAND_TYPE_NONE },
7469 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
7470 BFD_RELOC_X86_64_DTPOFF32 },
7471 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7472 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7473 _dummy_first_bfd_reloc_code_real },
7474 OPERAND_TYPE_NONE },
7475 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7476 _dummy_first_bfd_reloc_code_real },
7477 OPERAND_TYPE_NONE },
7478 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
7479 BFD_RELOC_X86_64_GOT32 },
7480 OPERAND_TYPE_IMM32_32S_64_DISP32 },
7481 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
7482 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7483 OPERAND_TYPE_IMM32_32S_DISP32 },
7484 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
7485 BFD_RELOC_X86_64_TLSDESC_CALL },
7486 OPERAND_TYPE_IMM32_32S_DISP32 },
7491 #if defined (OBJ_MAYBE_ELF)
7496 for (cp = input_line_pointer; *cp != '@'; cp++)
7497 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7500 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7502 int len = gotrel[j].len;
7503 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7505 if (gotrel[j].rel[object_64bit] != 0)
7508 char *tmpbuf, *past_reloc;
7510 *rel = gotrel[j].rel[object_64bit];
7514 if (flag_code != CODE_64BIT)
7516 types->bitfield.imm32 = 1;
7517 types->bitfield.disp32 = 1;
7520 *types = gotrel[j].types64;
7523 if (j != 0 && GOT_symbol == NULL)
7524 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7526 /* The length of the first part of our input line. */
7527 first = cp - input_line_pointer;
7529 /* The second part goes from after the reloc token until
7530 (and including) an end_of_line char or comma. */
7531 past_reloc = cp + 1 + len;
7533 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7535 second = cp + 1 - past_reloc;
7537 /* Allocate and copy string. The trailing NUL shouldn't
7538 be necessary, but be safe. */
7539 tmpbuf = (char *) xmalloc (first + second + 2);
7540 memcpy (tmpbuf, input_line_pointer, first);
7541 if (second != 0 && *past_reloc != ' ')
7542 /* Replace the relocation token with ' ', so that
7543 errors like foo@GOTOFF1 will be detected. */
7544 tmpbuf[first++] = ' ';
7546 /* Increment length by 1 if the relocation token is
7551 memcpy (tmpbuf + first, past_reloc, second);
7552 tmpbuf[first + second] = '\0';
7556 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7557 gotrel[j].str, 1 << (5 + object_64bit));
7562 /* Might be a symbol version string. Don't as_bad here. */
7571 /* Parse operands of the form
7572 <symbol>@SECREL32+<nnn>
7574 If we find one, set up the correct relocation in RELOC and copy the
7575 input string, minus the `@SECREL32' into a malloc'd buffer for
7576 parsing by the calling routine. Return this buffer, and if ADJUST
7577 is non-null set it to the length of the string we removed from the
7578 input line. Otherwise return NULL.
7580 This function is copied from the ELF version above adjusted for PE targets. */
7583 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7584 int *adjust ATTRIBUTE_UNUSED,
7585 i386_operand_type *types)
7591 const enum bfd_reloc_code_real rel[2];
7592 const i386_operand_type types64;
7596 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
7597 BFD_RELOC_32_SECREL },
7598 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7604 for (cp = input_line_pointer; *cp != '@'; cp++)
7605 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7608 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7610 int len = gotrel[j].len;
7612 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7614 if (gotrel[j].rel[object_64bit] != 0)
7617 char *tmpbuf, *past_reloc;
7619 *rel = gotrel[j].rel[object_64bit];
7625 if (flag_code != CODE_64BIT)
7627 types->bitfield.imm32 = 1;
7628 types->bitfield.disp32 = 1;
7631 *types = gotrel[j].types64;
7634 /* The length of the first part of our input line. */
7635 first = cp - input_line_pointer;
7637 /* The second part goes from after the reloc token until
7638 (and including) an end_of_line char or comma. */
7639 past_reloc = cp + 1 + len;
7641 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7643 second = cp + 1 - past_reloc;
7645 /* Allocate and copy string. The trailing NUL shouldn't
7646 be necessary, but be safe. */
7647 tmpbuf = (char *) xmalloc (first + second + 2);
7648 memcpy (tmpbuf, input_line_pointer, first);
7649 if (second != 0 && *past_reloc != ' ')
7650 /* Replace the relocation token with ' ', so that
7651 errors like foo@SECLREL321 will be detected. */
7652 tmpbuf[first++] = ' ';
7653 memcpy (tmpbuf + first, past_reloc, second);
7654 tmpbuf[first + second] = '\0';
7658 as_bad (_("@%s reloc is not supported with %d-bit output format"),
7659 gotrel[j].str, 1 << (5 + object_64bit));
7664 /* Might be a symbol version string. Don't as_bad here. */
7670 bfd_reloc_code_real_type
7671 x86_cons (expressionS *exp, int size)
7673 bfd_reloc_code_real_type got_reloc = NO_RELOC;
7675 intel_syntax = -intel_syntax;
7678 if (size == 4 || (object_64bit && size == 8))
7680 /* Handle @GOTOFF and the like in an expression. */
7682 char *gotfree_input_line;
7685 save = input_line_pointer;
7686 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
7687 if (gotfree_input_line)
7688 input_line_pointer = gotfree_input_line;
7692 if (gotfree_input_line)
7694 /* expression () has merrily parsed up to the end of line,
7695 or a comma - in the wrong buffer. Transfer how far
7696 input_line_pointer has moved to the right buffer. */
7697 input_line_pointer = (save
7698 + (input_line_pointer - gotfree_input_line)
7700 free (gotfree_input_line);
7701 if (exp->X_op == O_constant
7702 || exp->X_op == O_absent
7703 || exp->X_op == O_illegal
7704 || exp->X_op == O_register
7705 || exp->X_op == O_big)
7707 char c = *input_line_pointer;
7708 *input_line_pointer = 0;
7709 as_bad (_("missing or invalid expression `%s'"), save);
7710 *input_line_pointer = c;
7717 intel_syntax = -intel_syntax;
7720 i386_intel_simplify (exp);
7726 signed_cons (int size)
7728 if (flag_code == CODE_64BIT)
7736 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7743 if (exp.X_op == O_symbol)
7744 exp.X_op = O_secrel;
7746 emit_expr (&exp, 4);
7748 while (*input_line_pointer++ == ',');
7750 input_line_pointer--;
7751 demand_empty_rest_of_line ();
7755 /* Handle Vector operations. */
7758 check_VecOperations (char *op_string, char *op_end)
7760 const reg_entry *mask;
7765 && (op_end == NULL || op_string < op_end))
7768 if (*op_string == '{')
7772 /* Check broadcasts. */
7773 if (strncmp (op_string, "1to", 3) == 0)
7778 goto duplicated_vec_op;
7781 if (*op_string == '8')
7782 bcst_type = BROADCAST_1TO8;
7783 else if (*op_string == '4')
7784 bcst_type = BROADCAST_1TO4;
7785 else if (*op_string == '2')
7786 bcst_type = BROADCAST_1TO2;
7787 else if (*op_string == '1'
7788 && *(op_string+1) == '6')
7790 bcst_type = BROADCAST_1TO16;
7795 as_bad (_("Unsupported broadcast: `%s'"), saved);
7800 broadcast_op.type = bcst_type;
7801 broadcast_op.operand = this_operand;
7802 i.broadcast = &broadcast_op;
7804 /* Check masking operation. */
7805 else if ((mask = parse_register (op_string, &end_op)) != NULL)
7807 /* k0 can't be used for write mask. */
7808 if (mask->reg_num == 0)
7810 as_bad (_("`%s' can't be used for write mask"),
7817 mask_op.mask = mask;
7818 mask_op.zeroing = 0;
7819 mask_op.operand = this_operand;
7825 goto duplicated_vec_op;
7827 i.mask->mask = mask;
7829 /* Only "{z}" is allowed here. No need to check
7830 zeroing mask explicitly. */
7831 if (i.mask->operand != this_operand)
7833 as_bad (_("invalid write mask `%s'"), saved);
7840 /* Check zeroing-flag for masking operation. */
7841 else if (*op_string == 'z')
7845 mask_op.mask = NULL;
7846 mask_op.zeroing = 1;
7847 mask_op.operand = this_operand;
7852 if (i.mask->zeroing)
7855 as_bad (_("duplicated `%s'"), saved);
7859 i.mask->zeroing = 1;
7861 /* Only "{%k}" is allowed here. No need to check mask
7862 register explicitly. */
7863 if (i.mask->operand != this_operand)
7865 as_bad (_("invalid zeroing-masking `%s'"),
7874 goto unknown_vec_op;
7876 if (*op_string != '}')
7878 as_bad (_("missing `}' in `%s'"), saved);
7885 /* We don't know this one. */
7886 as_bad (_("unknown vector operation: `%s'"), saved);
7894 i386_immediate (char *imm_start)
7896 char *save_input_line_pointer;
7897 char *gotfree_input_line;
7900 i386_operand_type types;
7902 operand_type_set (&types, ~0);
7904 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7906 as_bad (_("at most %d immediate operands are allowed"),
7907 MAX_IMMEDIATE_OPERANDS);
7911 exp = &im_expressions[i.imm_operands++];
7912 i.op[this_operand].imms = exp;
7914 if (is_space_char (*imm_start))
7917 save_input_line_pointer = input_line_pointer;
7918 input_line_pointer = imm_start;
7920 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7921 if (gotfree_input_line)
7922 input_line_pointer = gotfree_input_line;
7924 exp_seg = expression (exp);
7928 /* Handle vector operations. */
7929 if (*input_line_pointer == '{')
7931 input_line_pointer = check_VecOperations (input_line_pointer,
7933 if (input_line_pointer == NULL)
7937 if (*input_line_pointer)
7938 as_bad (_("junk `%s' after expression"), input_line_pointer);
7940 input_line_pointer = save_input_line_pointer;
7941 if (gotfree_input_line)
7943 free (gotfree_input_line);
7945 if (exp->X_op == O_constant || exp->X_op == O_register)
7946 exp->X_op = O_illegal;
7949 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7953 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7954 i386_operand_type types, const char *imm_start)
7956 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7959 as_bad (_("missing or invalid immediate expression `%s'"),
7963 else if (exp->X_op == O_constant)
7965 /* Size it properly later. */
7966 i.types[this_operand].bitfield.imm64 = 1;
7967 /* If not 64bit, sign extend val. */
7968 if (flag_code != CODE_64BIT
7969 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7971 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7973 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7974 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7975 && exp_seg != absolute_section
7976 && exp_seg != text_section
7977 && exp_seg != data_section
7978 && exp_seg != bss_section
7979 && exp_seg != undefined_section
7980 && !bfd_is_com_section (exp_seg))
7982 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7986 else if (!intel_syntax && exp_seg == reg_section)
7989 as_bad (_("illegal immediate register operand %s"), imm_start);
7994 /* This is an address. The size of the address will be
7995 determined later, depending on destination register,
7996 suffix, or the default for the section. */
7997 i.types[this_operand].bitfield.imm8 = 1;
7998 i.types[this_operand].bitfield.imm16 = 1;
7999 i.types[this_operand].bitfield.imm32 = 1;
8000 i.types[this_operand].bitfield.imm32s = 1;
8001 i.types[this_operand].bitfield.imm64 = 1;
8002 i.types[this_operand] = operand_type_and (i.types[this_operand],
8010 i386_scale (char *scale)
8013 char *save = input_line_pointer;
8015 input_line_pointer = scale;
8016 val = get_absolute_expression ();
8021 i.log2_scale_factor = 0;
8024 i.log2_scale_factor = 1;
8027 i.log2_scale_factor = 2;
8030 i.log2_scale_factor = 3;
8034 char sep = *input_line_pointer;
8036 *input_line_pointer = '\0';
8037 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8039 *input_line_pointer = sep;
8040 input_line_pointer = save;
8044 if (i.log2_scale_factor != 0 && i.index_reg == 0)
8046 as_warn (_("scale factor of %d without an index register"),
8047 1 << i.log2_scale_factor);
8048 i.log2_scale_factor = 0;
8050 scale = input_line_pointer;
8051 input_line_pointer = save;
8056 i386_displacement (char *disp_start, char *disp_end)
8060 char *save_input_line_pointer;
8061 char *gotfree_input_line;
8063 i386_operand_type bigdisp, types = anydisp;
8066 if (i.disp_operands == MAX_MEMORY_OPERANDS)
8068 as_bad (_("at most %d displacement operands are allowed"),
8069 MAX_MEMORY_OPERANDS);
8073 operand_type_set (&bigdisp, 0);
8074 if ((i.types[this_operand].bitfield.jumpabsolute)
8075 || (!current_templates->start->opcode_modifier.jump
8076 && !current_templates->start->opcode_modifier.jumpdword))
8078 bigdisp.bitfield.disp32 = 1;
8079 override = (i.prefix[ADDR_PREFIX] != 0);
8080 if (flag_code == CODE_64BIT)
8084 bigdisp.bitfield.disp32s = 1;
8085 bigdisp.bitfield.disp64 = 1;
8088 else if ((flag_code == CODE_16BIT) ^ override)
8090 bigdisp.bitfield.disp32 = 0;
8091 bigdisp.bitfield.disp16 = 1;
8096 /* For PC-relative branches, the width of the displacement
8097 is dependent upon data size, not address size. */
8098 override = (i.prefix[DATA_PREFIX] != 0);
8099 if (flag_code == CODE_64BIT)
8101 if (override || i.suffix == WORD_MNEM_SUFFIX)
8102 bigdisp.bitfield.disp16 = 1;
8105 bigdisp.bitfield.disp32 = 1;
8106 bigdisp.bitfield.disp32s = 1;
8112 override = (i.suffix == (flag_code != CODE_16BIT
8114 : LONG_MNEM_SUFFIX));
8115 bigdisp.bitfield.disp32 = 1;
8116 if ((flag_code == CODE_16BIT) ^ override)
8118 bigdisp.bitfield.disp32 = 0;
8119 bigdisp.bitfield.disp16 = 1;
8123 i.types[this_operand] = operand_type_or (i.types[this_operand],
8126 exp = &disp_expressions[i.disp_operands];
8127 i.op[this_operand].disps = exp;
8129 save_input_line_pointer = input_line_pointer;
8130 input_line_pointer = disp_start;
8131 END_STRING_AND_SAVE (disp_end);
8133 #ifndef GCC_ASM_O_HACK
8134 #define GCC_ASM_O_HACK 0
8137 END_STRING_AND_SAVE (disp_end + 1);
8138 if (i.types[this_operand].bitfield.baseIndex
8139 && displacement_string_end[-1] == '+')
8141 /* This hack is to avoid a warning when using the "o"
8142 constraint within gcc asm statements.
8145 #define _set_tssldt_desc(n,addr,limit,type) \
8146 __asm__ __volatile__ ( \
8148 "movw %w1,2+%0\n\t" \
8150 "movb %b1,4+%0\n\t" \
8151 "movb %4,5+%0\n\t" \
8152 "movb $0,6+%0\n\t" \
8153 "movb %h1,7+%0\n\t" \
8155 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8157 This works great except that the output assembler ends
8158 up looking a bit weird if it turns out that there is
8159 no offset. You end up producing code that looks like:
8172 So here we provide the missing zero. */
8174 *displacement_string_end = '0';
8177 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8178 if (gotfree_input_line)
8179 input_line_pointer = gotfree_input_line;
8181 exp_seg = expression (exp);
8184 if (*input_line_pointer)
8185 as_bad (_("junk `%s' after expression"), input_line_pointer);
8187 RESTORE_END_STRING (disp_end + 1);
8189 input_line_pointer = save_input_line_pointer;
8190 if (gotfree_input_line)
8192 free (gotfree_input_line);
8194 if (exp->X_op == O_constant || exp->X_op == O_register)
8195 exp->X_op = O_illegal;
8198 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8200 RESTORE_END_STRING (disp_end);
8206 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8207 i386_operand_type types, const char *disp_start)
8209 i386_operand_type bigdisp;
8212 /* We do this to make sure that the section symbol is in
8213 the symbol table. We will ultimately change the relocation
8214 to be relative to the beginning of the section. */
8215 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8216 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8217 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8219 if (exp->X_op != O_symbol)
8222 if (S_IS_LOCAL (exp->X_add_symbol)
8223 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8224 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8225 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8226 exp->X_op = O_subtract;
8227 exp->X_op_symbol = GOT_symbol;
8228 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8229 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8230 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8231 i.reloc[this_operand] = BFD_RELOC_64;
8233 i.reloc[this_operand] = BFD_RELOC_32;
8236 else if (exp->X_op == O_absent
8237 || exp->X_op == O_illegal
8238 || exp->X_op == O_big)
8241 as_bad (_("missing or invalid displacement expression `%s'"),
8246 else if (flag_code == CODE_64BIT
8247 && !i.prefix[ADDR_PREFIX]
8248 && exp->X_op == O_constant)
8250 /* Since displacement is signed extended to 64bit, don't allow
8251 disp32 and turn off disp32s if they are out of range. */
8252 i.types[this_operand].bitfield.disp32 = 0;
8253 if (!fits_in_signed_long (exp->X_add_number))
8255 i.types[this_operand].bitfield.disp32s = 0;
8256 if (i.types[this_operand].bitfield.baseindex)
8258 as_bad (_("0x%lx out range of signed 32bit displacement"),
8259 (long) exp->X_add_number);
8265 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8266 else if (exp->X_op != O_constant
8267 && OUTPUT_FLAVOR == bfd_target_aout_flavour
8268 && exp_seg != absolute_section
8269 && exp_seg != text_section
8270 && exp_seg != data_section
8271 && exp_seg != bss_section
8272 && exp_seg != undefined_section
8273 && !bfd_is_com_section (exp_seg))
8275 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8280 /* Check if this is a displacement only operand. */
8281 bigdisp = i.types[this_operand];
8282 bigdisp.bitfield.disp8 = 0;
8283 bigdisp.bitfield.disp16 = 0;
8284 bigdisp.bitfield.disp32 = 0;
8285 bigdisp.bitfield.disp32s = 0;
8286 bigdisp.bitfield.disp64 = 0;
8287 if (operand_type_all_zero (&bigdisp))
8288 i.types[this_operand] = operand_type_and (i.types[this_operand],
8294 /* Make sure the memory operand we've been dealt is valid.
8295 Return 1 on success, 0 on a failure. */
8298 i386_index_check (const char *operand_string)
8300 const char *kind = "base/index";
8301 enum flag_code addr_mode;
8303 if (i.prefix[ADDR_PREFIX])
8304 addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8307 addr_mode = flag_code;
8309 #if INFER_ADDR_PREFIX
8310 if (i.mem_operands == 0)
8312 /* Infer address prefix from the first memory operand. */
8313 const reg_entry *addr_reg = i.base_reg;
8315 if (addr_reg == NULL)
8316 addr_reg = i.index_reg;
8320 if (addr_reg->reg_num == RegEip
8321 || addr_reg->reg_num == RegEiz
8322 || addr_reg->reg_type.bitfield.reg32)
8323 addr_mode = CODE_32BIT;
8324 else if (flag_code != CODE_64BIT
8325 && addr_reg->reg_type.bitfield.reg16)
8326 addr_mode = CODE_16BIT;
8328 if (addr_mode != flag_code)
8330 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8332 /* Change the size of any displacement too. At most one
8333 of Disp16 or Disp32 is set.
8334 FIXME. There doesn't seem to be any real need for
8335 separate Disp16 and Disp32 flags. The same goes for
8336 Imm16 and Imm32. Removing them would probably clean
8337 up the code quite a lot. */
8338 if (flag_code != CODE_64BIT
8339 && (i.types[this_operand].bitfield.disp16
8340 || i.types[this_operand].bitfield.disp32))
8341 i.types[this_operand]
8342 = operand_type_xor (i.types[this_operand], disp16_32);
8349 if (current_templates->start->opcode_modifier.isstring
8350 && !current_templates->start->opcode_modifier.immext
8351 && (current_templates->end[-1].opcode_modifier.isstring
8354 /* Memory operands of string insns are special in that they only allow
8355 a single register (rDI, rSI, or rBX) as their memory address. */
8356 const reg_entry *expected_reg;
8357 static const char *di_si[][2] =
8363 static const char *bx[] = { "ebx", "bx", "rbx" };
8365 kind = "string address";
8367 if (current_templates->start->opcode_modifier.w)
8369 i386_operand_type type = current_templates->end[-1].operand_types[0];
8371 if (!type.bitfield.baseindex
8372 || ((!i.mem_operands != !intel_syntax)
8373 && current_templates->end[-1].operand_types[1]
8374 .bitfield.baseindex))
8375 type = current_templates->end[-1].operand_types[1];
8376 expected_reg = hash_find (reg_hash,
8377 di_si[addr_mode][type.bitfield.esseg]);
8381 expected_reg = hash_find (reg_hash, bx[addr_mode]);
8383 if (i.base_reg != expected_reg
8385 || operand_type_check (i.types[this_operand], disp))
8387 /* The second memory operand must have the same size as
8391 && !((addr_mode == CODE_64BIT
8392 && i.base_reg->reg_type.bitfield.reg64)
8393 || (addr_mode == CODE_32BIT
8394 ? i.base_reg->reg_type.bitfield.reg32
8395 : i.base_reg->reg_type.bitfield.reg16)))
8398 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8400 intel_syntax ? '[' : '(',
8402 expected_reg->reg_name,
8403 intel_syntax ? ']' : ')');
8410 as_bad (_("`%s' is not a valid %s expression"),
8411 operand_string, kind);
8416 if (addr_mode != CODE_16BIT)
8418 /* 32-bit/64-bit checks. */
8420 && (addr_mode == CODE_64BIT
8421 ? !i.base_reg->reg_type.bitfield.reg64
8422 : !i.base_reg->reg_type.bitfield.reg32)
8424 || (i.base_reg->reg_num
8425 != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8427 && !i.index_reg->reg_type.bitfield.regxmm
8428 && !i.index_reg->reg_type.bitfield.regymm
8429 && !i.index_reg->reg_type.bitfield.regzmm
8430 && ((addr_mode == CODE_64BIT
8431 ? !(i.index_reg->reg_type.bitfield.reg64
8432 || i.index_reg->reg_num == RegRiz)
8433 : !(i.index_reg->reg_type.bitfield.reg32
8434 || i.index_reg->reg_num == RegEiz))
8435 || !i.index_reg->reg_type.bitfield.baseindex)))
8440 /* 16-bit checks. */
8442 && (!i.base_reg->reg_type.bitfield.reg16
8443 || !i.base_reg->reg_type.bitfield.baseindex))
8445 && (!i.index_reg->reg_type.bitfield.reg16
8446 || !i.index_reg->reg_type.bitfield.baseindex
8448 && i.base_reg->reg_num < 6
8449 && i.index_reg->reg_num >= 6
8450 && i.log2_scale_factor == 0))))
8457 /* Handle vector immediates. */
8460 RC_SAE_immediate (const char *imm_start)
8462 unsigned int match_found, j;
8463 const char *pstr = imm_start;
8471 for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8473 if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8477 rc_op.type = RC_NamesTable[j].type;
8478 rc_op.operand = this_operand;
8479 i.rounding = &rc_op;
8483 as_bad (_("duplicated `%s'"), imm_start);
8486 pstr += RC_NamesTable[j].len;
8496 as_bad (_("Missing '}': '%s'"), imm_start);
8499 /* RC/SAE immediate string should contain nothing more. */;
8502 as_bad (_("Junk after '}': '%s'"), imm_start);
8506 exp = &im_expressions[i.imm_operands++];
8507 i.op[this_operand].imms = exp;
8509 exp->X_op = O_constant;
8510 exp->X_add_number = 0;
8511 exp->X_add_symbol = (symbolS *) 0;
8512 exp->X_op_symbol = (symbolS *) 0;
8514 i.types[this_operand].bitfield.imm8 = 1;
8518 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
8522 i386_att_operand (char *operand_string)
8526 char *op_string = operand_string;
8528 if (is_space_char (*op_string))
8531 /* We check for an absolute prefix (differentiating,
8532 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
8533 if (*op_string == ABSOLUTE_PREFIX)
8536 if (is_space_char (*op_string))
8538 i.types[this_operand].bitfield.jumpabsolute = 1;
8541 /* Check if operand is a register. */
8542 if ((r = parse_register (op_string, &end_op)) != NULL)
8544 i386_operand_type temp;
8546 /* Check for a segment override by searching for ':' after a
8547 segment register. */
8549 if (is_space_char (*op_string))
8551 if (*op_string == ':'
8552 && (r->reg_type.bitfield.sreg2
8553 || r->reg_type.bitfield.sreg3))
8558 i.seg[i.mem_operands] = &es;
8561 i.seg[i.mem_operands] = &cs;
8564 i.seg[i.mem_operands] = &ss;
8567 i.seg[i.mem_operands] = &ds;
8570 i.seg[i.mem_operands] = &fs;
8573 i.seg[i.mem_operands] = &gs;
8577 /* Skip the ':' and whitespace. */
8579 if (is_space_char (*op_string))
8582 if (!is_digit_char (*op_string)
8583 && !is_identifier_char (*op_string)
8584 && *op_string != '('
8585 && *op_string != ABSOLUTE_PREFIX)
8587 as_bad (_("bad memory operand `%s'"), op_string);
8590 /* Handle case of %es:*foo. */
8591 if (*op_string == ABSOLUTE_PREFIX)
8594 if (is_space_char (*op_string))
8596 i.types[this_operand].bitfield.jumpabsolute = 1;
8598 goto do_memory_reference;
8601 /* Handle vector operations. */
8602 if (*op_string == '{')
8604 op_string = check_VecOperations (op_string, NULL);
8605 if (op_string == NULL)
8611 as_bad (_("junk `%s' after register"), op_string);
8615 temp.bitfield.baseindex = 0;
8616 i.types[this_operand] = operand_type_or (i.types[this_operand],
8618 i.types[this_operand].bitfield.unspecified = 0;
8619 i.op[this_operand].regs = r;
8622 else if (*op_string == REGISTER_PREFIX)
8624 as_bad (_("bad register name `%s'"), op_string);
8627 else if (*op_string == IMMEDIATE_PREFIX)
8630 if (i.types[this_operand].bitfield.jumpabsolute)
8632 as_bad (_("immediate operand illegal with absolute jump"));
8635 if (!i386_immediate (op_string))
8638 else if (RC_SAE_immediate (operand_string))
8640 /* If it is a RC or SAE immediate, do nothing. */
8643 else if (is_digit_char (*op_string)
8644 || is_identifier_char (*op_string)
8645 || *op_string == '(')
8647 /* This is a memory reference of some sort. */
8650 /* Start and end of displacement string expression (if found). */
8651 char *displacement_string_start;
8652 char *displacement_string_end;
8655 do_memory_reference:
8656 if ((i.mem_operands == 1
8657 && !current_templates->start->opcode_modifier.isstring)
8658 || i.mem_operands == 2)
8660 as_bad (_("too many memory references for `%s'"),
8661 current_templates->start->name);
8665 /* Check for base index form. We detect the base index form by
8666 looking for an ')' at the end of the operand, searching
8667 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8669 base_string = op_string + strlen (op_string);
8671 /* Handle vector operations. */
8672 vop_start = strchr (op_string, '{');
8673 if (vop_start && vop_start < base_string)
8675 if (check_VecOperations (vop_start, base_string) == NULL)
8677 base_string = vop_start;
8681 if (is_space_char (*base_string))
8684 /* If we only have a displacement, set-up for it to be parsed later. */
8685 displacement_string_start = op_string;
8686 displacement_string_end = base_string + 1;
8688 if (*base_string == ')')
8691 unsigned int parens_balanced = 1;
8692 /* We've already checked that the number of left & right ()'s are
8693 equal, so this loop will not be infinite. */
8697 if (*base_string == ')')
8699 if (*base_string == '(')
8702 while (parens_balanced);
8704 temp_string = base_string;
8706 /* Skip past '(' and whitespace. */
8708 if (is_space_char (*base_string))
8711 if (*base_string == ','
8712 || ((i.base_reg = parse_register (base_string, &end_op))
8715 displacement_string_end = temp_string;
8717 i.types[this_operand].bitfield.baseindex = 1;
8721 base_string = end_op;
8722 if (is_space_char (*base_string))
8726 /* There may be an index reg or scale factor here. */
8727 if (*base_string == ',')
8730 if (is_space_char (*base_string))
8733 if ((i.index_reg = parse_register (base_string, &end_op))
8736 base_string = end_op;
8737 if (is_space_char (*base_string))
8739 if (*base_string == ',')
8742 if (is_space_char (*base_string))
8745 else if (*base_string != ')')
8747 as_bad (_("expecting `,' or `)' "
8748 "after index register in `%s'"),
8753 else if (*base_string == REGISTER_PREFIX)
8755 end_op = strchr (base_string, ',');
8758 as_bad (_("bad register name `%s'"), base_string);
8762 /* Check for scale factor. */
8763 if (*base_string != ')')
8765 char *end_scale = i386_scale (base_string);
8770 base_string = end_scale;
8771 if (is_space_char (*base_string))
8773 if (*base_string != ')')
8775 as_bad (_("expecting `)' "
8776 "after scale factor in `%s'"),
8781 else if (!i.index_reg)
8783 as_bad (_("expecting index register or scale factor "
8784 "after `,'; got '%c'"),
8789 else if (*base_string != ')')
8791 as_bad (_("expecting `,' or `)' "
8792 "after base register in `%s'"),
8797 else if (*base_string == REGISTER_PREFIX)
8799 end_op = strchr (base_string, ',');
8802 as_bad (_("bad register name `%s'"), base_string);
8807 /* If there's an expression beginning the operand, parse it,
8808 assuming displacement_string_start and
8809 displacement_string_end are meaningful. */
8810 if (displacement_string_start != displacement_string_end)
8812 if (!i386_displacement (displacement_string_start,
8813 displacement_string_end))
8817 /* Special case for (%dx) while doing input/output op. */
8819 && operand_type_equal (&i.base_reg->reg_type,
8820 ®16_inoutportreg)
8822 && i.log2_scale_factor == 0
8823 && i.seg[i.mem_operands] == 0
8824 && !operand_type_check (i.types[this_operand], disp))
8826 i.types[this_operand] = inoutportreg;
8830 if (i386_index_check (operand_string) == 0)
8832 i.types[this_operand].bitfield.mem = 1;
8837 /* It's not a memory operand; argh! */
8838 as_bad (_("invalid char %s beginning operand %d `%s'"),
8839 output_invalid (*op_string),
8844 return 1; /* Normal return. */
8847 /* Calculate the maximum variable size (i.e., excluding fr_fix)
8848 that an rs_machine_dependent frag may reach. */
8851 i386_frag_max_var (fragS *frag)
8853 /* The only relaxable frags are for jumps.
8854 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
8855 gas_assert (frag->fr_type == rs_machine_dependent);
8856 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
8859 /* md_estimate_size_before_relax()
8861 Called just before relax() for rs_machine_dependent frags. The x86
8862 assembler uses these frags to handle variable size jump
8865 Any symbol that is now undefined will not become defined.
8866 Return the correct fr_subtype in the frag.
8867 Return the initial "guess for variable size of frag" to caller.
8868 The guess is actually the growth beyond the fixed part. Whatever
8869 we do to grow the fixed or variable part contributes to our
8873 md_estimate_size_before_relax (fragS *fragP, segT segment)
8875 /* We've already got fragP->fr_subtype right; all we have to do is
8876 check for un-relaxable symbols. On an ELF system, we can't relax
8877 an externally visible symbol, because it may be overridden by a
8879 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
8880 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8882 && (S_IS_EXTERNAL (fragP->fr_symbol)
8883 || S_IS_WEAK (fragP->fr_symbol)
8884 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
8885 & BSF_GNU_INDIRECT_FUNCTION))))
8887 #if defined (OBJ_COFF) && defined (TE_PE)
8888 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
8889 && S_IS_WEAK (fragP->fr_symbol))
8893 /* Symbol is undefined in this segment, or we need to keep a
8894 reloc so that weak symbols can be overridden. */
8895 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
8896 enum bfd_reloc_code_real reloc_type;
8897 unsigned char *opcode;
8900 if (fragP->fr_var != NO_RELOC)
8901 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
8903 reloc_type = BFD_RELOC_16_PCREL;
8905 reloc_type = BFD_RELOC_32_PCREL;
8907 old_fr_fix = fragP->fr_fix;
8908 opcode = (unsigned char *) fragP->fr_opcode;
8910 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
8913 /* Make jmp (0xeb) a (d)word displacement jump. */
8915 fragP->fr_fix += size;
8916 fix_new (fragP, old_fr_fix, size,
8918 fragP->fr_offset, 1,
8924 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
8926 /* Negate the condition, and branch past an
8927 unconditional jump. */
8930 /* Insert an unconditional jump. */
8932 /* We added two extra opcode bytes, and have a two byte
8934 fragP->fr_fix += 2 + 2;
8935 fix_new (fragP, old_fr_fix + 2, 2,
8937 fragP->fr_offset, 1,
8944 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
8949 fixP = fix_new (fragP, old_fr_fix, 1,
8951 fragP->fr_offset, 1,
8953 fixP->fx_signed = 1;
8957 /* This changes the byte-displacement jump 0x7N
8958 to the (d)word-displacement jump 0x0f,0x8N. */
8959 opcode[1] = opcode[0] + 0x10;
8960 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8961 /* We've added an opcode byte. */
8962 fragP->fr_fix += 1 + size;
8963 fix_new (fragP, old_fr_fix + 1, size,
8965 fragP->fr_offset, 1,
8970 BAD_CASE (fragP->fr_subtype);
8974 return fragP->fr_fix - old_fr_fix;
8977 /* Guess size depending on current relax state. Initially the relax
8978 state will correspond to a short jump and we return 1, because
8979 the variable part of the frag (the branch offset) is one byte
8980 long. However, we can relax a section more than once and in that
8981 case we must either set fr_subtype back to the unrelaxed state,
8982 or return the value for the appropriate branch. */
8983 return md_relax_table[fragP->fr_subtype].rlx_length;
8986 /* Called after relax() is finished.
8988 In: Address of frag.
8989 fr_type == rs_machine_dependent.
8990 fr_subtype is what the address relaxed to.
8992 Out: Any fixSs and constants are set up.
8993 Caller will turn frag into a ".space 0". */
8996 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8999 unsigned char *opcode;
9000 unsigned char *where_to_put_displacement = NULL;
9001 offsetT target_address;
9002 offsetT opcode_address;
9003 unsigned int extension = 0;
9004 offsetT displacement_from_opcode_start;
9006 opcode = (unsigned char *) fragP->fr_opcode;
9008 /* Address we want to reach in file space. */
9009 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
9011 /* Address opcode resides at in file space. */
9012 opcode_address = fragP->fr_address + fragP->fr_fix;
9014 /* Displacement from opcode start to fill into instruction. */
9015 displacement_from_opcode_start = target_address - opcode_address;
9017 if ((fragP->fr_subtype & BIG) == 0)
9019 /* Don't have to change opcode. */
9020 extension = 1; /* 1 opcode + 1 displacement */
9021 where_to_put_displacement = &opcode[1];
9025 if (no_cond_jump_promotion
9026 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
9027 as_warn_where (fragP->fr_file, fragP->fr_line,
9028 _("long jump required"));
9030 switch (fragP->fr_subtype)
9032 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
9033 extension = 4; /* 1 opcode + 4 displacement */
9035 where_to_put_displacement = &opcode[1];
9038 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
9039 extension = 2; /* 1 opcode + 2 displacement */
9041 where_to_put_displacement = &opcode[1];
9044 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
9045 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
9046 extension = 5; /* 2 opcode + 4 displacement */
9047 opcode[1] = opcode[0] + 0x10;
9048 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9049 where_to_put_displacement = &opcode[2];
9052 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
9053 extension = 3; /* 2 opcode + 2 displacement */
9054 opcode[1] = opcode[0] + 0x10;
9055 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9056 where_to_put_displacement = &opcode[2];
9059 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9064 where_to_put_displacement = &opcode[3];
9068 BAD_CASE (fragP->fr_subtype);
9073 /* If size if less then four we are sure that the operand fits,
9074 but if it's 4, then it could be that the displacement is larger
9076 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9078 && ((addressT) (displacement_from_opcode_start - extension
9079 + ((addressT) 1 << 31))
9080 > (((addressT) 2 << 31) - 1)))
9082 as_bad_where (fragP->fr_file, fragP->fr_line,
9083 _("jump target out of range"));
9084 /* Make us emit 0. */
9085 displacement_from_opcode_start = extension;
9087 /* Now put displacement after opcode. */
9088 md_number_to_chars ((char *) where_to_put_displacement,
9089 (valueT) (displacement_from_opcode_start - extension),
9090 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9091 fragP->fr_fix += extension;
9094 /* Apply a fixup (fixP) to segment data, once it has been determined
9095 by our caller that we have all the info we need to fix it up.
9097 Parameter valP is the pointer to the value of the bits.
9099 On the 386, immediates, displacements, and data pointers are all in
9100 the same (little-endian) format, so we don't need to care about which
9104 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9106 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9107 valueT value = *valP;
9109 #if !defined (TE_Mach)
9112 switch (fixP->fx_r_type)
9118 fixP->fx_r_type = BFD_RELOC_64_PCREL;
9121 case BFD_RELOC_X86_64_32S:
9122 fixP->fx_r_type = BFD_RELOC_32_PCREL;
9125 fixP->fx_r_type = BFD_RELOC_16_PCREL;
9128 fixP->fx_r_type = BFD_RELOC_8_PCREL;
9133 if (fixP->fx_addsy != NULL
9134 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9135 || fixP->fx_r_type == BFD_RELOC_64_PCREL
9136 || fixP->fx_r_type == BFD_RELOC_16_PCREL
9137 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
9138 && !use_rela_relocations)
9140 /* This is a hack. There should be a better way to handle this.
9141 This covers for the fact that bfd_install_relocation will
9142 subtract the current location (for partial_inplace, PC relative
9143 relocations); see more below. */
9147 || OUTPUT_FLAVOR == bfd_target_coff_flavour
9150 value += fixP->fx_where + fixP->fx_frag->fr_address;
9152 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9155 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9158 || (symbol_section_p (fixP->fx_addsy)
9159 && sym_seg != absolute_section))
9160 && !generic_force_reloc (fixP))
9162 /* Yes, we add the values in twice. This is because
9163 bfd_install_relocation subtracts them out again. I think
9164 bfd_install_relocation is broken, but I don't dare change
9166 value += fixP->fx_where + fixP->fx_frag->fr_address;
9170 #if defined (OBJ_COFF) && defined (TE_PE)
9171 /* For some reason, the PE format does not store a
9172 section address offset for a PC relative symbol. */
9173 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9174 || S_IS_WEAK (fixP->fx_addsy))
9175 value += md_pcrel_from (fixP);
9178 #if defined (OBJ_COFF) && defined (TE_PE)
9179 if (fixP->fx_addsy != NULL
9180 && S_IS_WEAK (fixP->fx_addsy)
9181 /* PR 16858: Do not modify weak function references. */
9182 && ! fixP->fx_pcrel)
9184 #if !defined (TE_PEP)
9185 /* For x86 PE weak function symbols are neither PC-relative
9186 nor do they set S_IS_FUNCTION. So the only reliable way
9187 to detect them is to check the flags of their containing
9189 if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
9190 && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
9194 value -= S_GET_VALUE (fixP->fx_addsy);
9198 /* Fix a few things - the dynamic linker expects certain values here,
9199 and we must not disappoint it. */
9200 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9201 if (IS_ELF && fixP->fx_addsy)
9202 switch (fixP->fx_r_type)
9204 case BFD_RELOC_386_PLT32:
9205 case BFD_RELOC_X86_64_PLT32:
9206 /* Make the jump instruction point to the address of the operand. At
9207 runtime we merely add the offset to the actual PLT entry. */
9211 case BFD_RELOC_386_TLS_GD:
9212 case BFD_RELOC_386_TLS_LDM:
9213 case BFD_RELOC_386_TLS_IE_32:
9214 case BFD_RELOC_386_TLS_IE:
9215 case BFD_RELOC_386_TLS_GOTIE:
9216 case BFD_RELOC_386_TLS_GOTDESC:
9217 case BFD_RELOC_X86_64_TLSGD:
9218 case BFD_RELOC_X86_64_TLSLD:
9219 case BFD_RELOC_X86_64_GOTTPOFF:
9220 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9221 value = 0; /* Fully resolved at runtime. No addend. */
9223 case BFD_RELOC_386_TLS_LE:
9224 case BFD_RELOC_386_TLS_LDO_32:
9225 case BFD_RELOC_386_TLS_LE_32:
9226 case BFD_RELOC_X86_64_DTPOFF32:
9227 case BFD_RELOC_X86_64_DTPOFF64:
9228 case BFD_RELOC_X86_64_TPOFF32:
9229 case BFD_RELOC_X86_64_TPOFF64:
9230 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9233 case BFD_RELOC_386_TLS_DESC_CALL:
9234 case BFD_RELOC_X86_64_TLSDESC_CALL:
9235 value = 0; /* Fully resolved at runtime. No addend. */
9236 S_SET_THREAD_LOCAL (fixP->fx_addsy);
9240 case BFD_RELOC_386_GOT32:
9241 case BFD_RELOC_X86_64_GOT32:
9242 value = 0; /* Fully resolved at runtime. No addend. */
9245 case BFD_RELOC_VTABLE_INHERIT:
9246 case BFD_RELOC_VTABLE_ENTRY:
9253 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
9255 #endif /* !defined (TE_Mach) */
9257 /* Are we finished with this relocation now? */
9258 if (fixP->fx_addsy == NULL)
9260 #if defined (OBJ_COFF) && defined (TE_PE)
9261 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9264 /* Remember value for tc_gen_reloc. */
9265 fixP->fx_addnumber = value;
9266 /* Clear out the frag for now. */
9270 else if (use_rela_relocations)
9272 fixP->fx_no_overflow = 1;
9273 /* Remember value for tc_gen_reloc. */
9274 fixP->fx_addnumber = value;
9278 md_number_to_chars (p, value, fixP->fx_size);
9282 md_atof (int type, char *litP, int *sizeP)
9284 /* This outputs the LITTLENUMs in REVERSE order;
9285 in accord with the bigendian 386. */
9286 return ieee_md_atof (type, litP, sizeP, FALSE);
9289 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9292 output_invalid (int c)
9295 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9298 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9299 "(0x%x)", (unsigned char) c);
9300 return output_invalid_buf;
9303 /* REG_STRING starts *before* REGISTER_PREFIX. */
9305 static const reg_entry *
9306 parse_real_register (char *reg_string, char **end_op)
9308 char *s = reg_string;
9310 char reg_name_given[MAX_REG_NAME_SIZE + 1];
9313 /* Skip possible REGISTER_PREFIX and possible whitespace. */
9314 if (*s == REGISTER_PREFIX)
9317 if (is_space_char (*s))
9321 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9323 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9324 return (const reg_entry *) NULL;
9328 /* For naked regs, make sure that we are not dealing with an identifier.
9329 This prevents confusing an identifier like `eax_var' with register
9331 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9332 return (const reg_entry *) NULL;
9336 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9338 /* Handle floating point regs, allowing spaces in the (i) part. */
9339 if (r == i386_regtab /* %st is first entry of table */)
9341 if (is_space_char (*s))
9346 if (is_space_char (*s))
9348 if (*s >= '0' && *s <= '7')
9352 if (is_space_char (*s))
9357 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9362 /* We have "%st(" then garbage. */
9363 return (const reg_entry *) NULL;
9367 if (r == NULL || allow_pseudo_reg)
9370 if (operand_type_all_zero (&r->reg_type))
9371 return (const reg_entry *) NULL;
9373 if ((r->reg_type.bitfield.reg32
9374 || r->reg_type.bitfield.sreg3
9375 || r->reg_type.bitfield.control
9376 || r->reg_type.bitfield.debug
9377 || r->reg_type.bitfield.test)
9378 && !cpu_arch_flags.bitfield.cpui386)
9379 return (const reg_entry *) NULL;
9381 if (r->reg_type.bitfield.floatreg
9382 && !cpu_arch_flags.bitfield.cpu8087
9383 && !cpu_arch_flags.bitfield.cpu287
9384 && !cpu_arch_flags.bitfield.cpu387)
9385 return (const reg_entry *) NULL;
9387 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
9388 return (const reg_entry *) NULL;
9390 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
9391 return (const reg_entry *) NULL;
9393 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
9394 return (const reg_entry *) NULL;
9396 if ((r->reg_type.bitfield.regzmm || r->reg_type.bitfield.regmask)
9397 && !cpu_arch_flags.bitfield.cpuavx512f)
9398 return (const reg_entry *) NULL;
9400 /* Don't allow fake index register unless allow_index_reg isn't 0. */
9401 if (!allow_index_reg
9402 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9403 return (const reg_entry *) NULL;
9405 /* Upper 16 vector register is only available with VREX in 64bit
9407 if ((r->reg_flags & RegVRex))
9409 if (!cpu_arch_flags.bitfield.cpuvrex
9410 || flag_code != CODE_64BIT)
9411 return (const reg_entry *) NULL;
9416 if (((r->reg_flags & (RegRex64 | RegRex))
9417 || r->reg_type.bitfield.reg64)
9418 && (!cpu_arch_flags.bitfield.cpulm
9419 || !operand_type_equal (&r->reg_type, &control))
9420 && flag_code != CODE_64BIT)
9421 return (const reg_entry *) NULL;
9423 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9424 return (const reg_entry *) NULL;
9429 /* REG_STRING starts *before* REGISTER_PREFIX. */
9431 static const reg_entry *
9432 parse_register (char *reg_string, char **end_op)
9436 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9437 r = parse_real_register (reg_string, end_op);
9442 char *save = input_line_pointer;
9446 input_line_pointer = reg_string;
9447 c = get_symbol_end ();
9448 symbolP = symbol_find (reg_string);
9449 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9451 const expressionS *e = symbol_get_value_expression (symbolP);
9453 know (e->X_op == O_register);
9454 know (e->X_add_number >= 0
9455 && (valueT) e->X_add_number < i386_regtab_size);
9456 r = i386_regtab + e->X_add_number;
9457 if ((r->reg_flags & RegVRex))
9459 *end_op = input_line_pointer;
9461 *input_line_pointer = c;
9462 input_line_pointer = save;
9468 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9471 char *end = input_line_pointer;
9474 r = parse_register (name, &input_line_pointer);
9475 if (r && end <= input_line_pointer)
9477 *nextcharP = *input_line_pointer;
9478 *input_line_pointer = 0;
9479 e->X_op = O_register;
9480 e->X_add_number = r - i386_regtab;
9483 input_line_pointer = end;
9485 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9489 md_operand (expressionS *e)
9494 switch (*input_line_pointer)
9496 case REGISTER_PREFIX:
9497 r = parse_real_register (input_line_pointer, &end);
9500 e->X_op = O_register;
9501 e->X_add_number = r - i386_regtab;
9502 input_line_pointer = end;
9507 gas_assert (intel_syntax);
9508 end = input_line_pointer++;
9510 if (*input_line_pointer == ']')
9512 ++input_line_pointer;
9513 e->X_op_symbol = make_expr_symbol (e);
9514 e->X_add_symbol = NULL;
9515 e->X_add_number = 0;
9521 input_line_pointer = end;
9528 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9529 const char *md_shortopts = "kVQ:sqn";
9531 const char *md_shortopts = "qn";
9534 #define OPTION_32 (OPTION_MD_BASE + 0)
9535 #define OPTION_64 (OPTION_MD_BASE + 1)
9536 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9537 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9538 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9539 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9540 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9541 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9542 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9543 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9544 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9545 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9546 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9547 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9548 #define OPTION_X32 (OPTION_MD_BASE + 14)
9549 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9550 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9551 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9552 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9553 #define OPTION_OMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9554 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9556 struct option md_longopts[] =
9558 {"32", no_argument, NULL, OPTION_32},
9559 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9560 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9561 {"64", no_argument, NULL, OPTION_64},
9563 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9564 {"x32", no_argument, NULL, OPTION_X32},
9566 {"divide", no_argument, NULL, OPTION_DIVIDE},
9567 {"march", required_argument, NULL, OPTION_MARCH},
9568 {"mtune", required_argument, NULL, OPTION_MTUNE},
9569 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9570 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9571 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9572 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9573 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9574 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9575 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9576 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9577 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9578 {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9579 {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9580 {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9581 # if defined (TE_PE) || defined (TE_PEP)
9582 {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
9584 {"momit-lock-prefix", required_argument, NULL, OPTION_OMIT_LOCK_PREFIX},
9585 {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
9586 {NULL, no_argument, NULL, 0}
9588 size_t md_longopts_size = sizeof (md_longopts);
9591 md_parse_option (int c, char *arg)
9599 optimize_align_code = 0;
9606 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9607 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9608 should be emitted or not. FIXME: Not implemented. */
9612 /* -V: SVR4 argument to print version ID. */
9614 print_version_id ();
9617 /* -k: Ignore for FreeBSD compatibility. */
9622 /* -s: On i386 Solaris, this tells the native assembler to use
9623 .stab instead of .stab.excl. We always use .stab anyhow. */
9626 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9627 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9630 const char **list, **l;
9632 list = bfd_target_list ();
9633 for (l = list; *l != NULL; l++)
9634 if (CONST_STRNEQ (*l, "elf64-x86-64")
9635 || strcmp (*l, "coff-x86-64") == 0
9636 || strcmp (*l, "pe-x86-64") == 0
9637 || strcmp (*l, "pei-x86-64") == 0
9638 || strcmp (*l, "mach-o-x86-64") == 0)
9640 default_arch = "x86_64";
9644 as_fatal (_("no compiled in support for x86_64"));
9650 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9654 const char **list, **l;
9656 list = bfd_target_list ();
9657 for (l = list; *l != NULL; l++)
9658 if (CONST_STRNEQ (*l, "elf32-x86-64"))
9660 default_arch = "x86_64:32";
9664 as_fatal (_("no compiled in support for 32bit x86_64"));
9668 as_fatal (_("32bit x86_64 is only supported for ELF"));
9673 default_arch = "i386";
9677 #ifdef SVR4_COMMENT_CHARS
9682 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
9684 for (s = i386_comment_chars; *s != '\0'; s++)
9688 i386_comment_chars = n;
9694 arch = xstrdup (arg);
9698 as_fatal (_("invalid -march= option: `%s'"), arg);
9699 next = strchr (arch, '+');
9702 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9704 if (strcmp (arch, cpu_arch [j].name) == 0)
9707 if (! cpu_arch[j].flags.bitfield.cpui386)
9710 cpu_arch_name = cpu_arch[j].name;
9711 cpu_sub_arch_name = NULL;
9712 cpu_arch_flags = cpu_arch[j].flags;
9713 cpu_arch_isa = cpu_arch[j].type;
9714 cpu_arch_isa_flags = cpu_arch[j].flags;
9715 if (!cpu_arch_tune_set)
9717 cpu_arch_tune = cpu_arch_isa;
9718 cpu_arch_tune_flags = cpu_arch_isa_flags;
9722 else if (*cpu_arch [j].name == '.'
9723 && strcmp (arch, cpu_arch [j].name + 1) == 0)
9725 /* ISA entension. */
9726 i386_cpu_flags flags;
9728 if (!cpu_arch[j].negated)
9729 flags = cpu_flags_or (cpu_arch_flags,
9732 flags = cpu_flags_and_not (cpu_arch_flags,
9734 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
9736 if (cpu_sub_arch_name)
9738 char *name = cpu_sub_arch_name;
9739 cpu_sub_arch_name = concat (name,
9741 (const char *) NULL);
9745 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
9746 cpu_arch_flags = flags;
9747 cpu_arch_isa_flags = flags;
9753 if (j >= ARRAY_SIZE (cpu_arch))
9754 as_fatal (_("invalid -march= option: `%s'"), arg);
9758 while (next != NULL );
9763 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9764 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9766 if (strcmp (arg, cpu_arch [j].name) == 0)
9768 cpu_arch_tune_set = 1;
9769 cpu_arch_tune = cpu_arch [j].type;
9770 cpu_arch_tune_flags = cpu_arch[j].flags;
9774 if (j >= ARRAY_SIZE (cpu_arch))
9775 as_fatal (_("invalid -mtune= option: `%s'"), arg);
9778 case OPTION_MMNEMONIC:
9779 if (strcasecmp (arg, "att") == 0)
9781 else if (strcasecmp (arg, "intel") == 0)
9784 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
9787 case OPTION_MSYNTAX:
9788 if (strcasecmp (arg, "att") == 0)
9790 else if (strcasecmp (arg, "intel") == 0)
9793 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
9796 case OPTION_MINDEX_REG:
9797 allow_index_reg = 1;
9800 case OPTION_MNAKED_REG:
9801 allow_naked_reg = 1;
9804 case OPTION_MOLD_GCC:
9808 case OPTION_MSSE2AVX:
9812 case OPTION_MSSE_CHECK:
9813 if (strcasecmp (arg, "error") == 0)
9814 sse_check = check_error;
9815 else if (strcasecmp (arg, "warning") == 0)
9816 sse_check = check_warning;
9817 else if (strcasecmp (arg, "none") == 0)
9818 sse_check = check_none;
9820 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
9823 case OPTION_MOPERAND_CHECK:
9824 if (strcasecmp (arg, "error") == 0)
9825 operand_check = check_error;
9826 else if (strcasecmp (arg, "warning") == 0)
9827 operand_check = check_warning;
9828 else if (strcasecmp (arg, "none") == 0)
9829 operand_check = check_none;
9831 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
9834 case OPTION_MAVXSCALAR:
9835 if (strcasecmp (arg, "128") == 0)
9837 else if (strcasecmp (arg, "256") == 0)
9840 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
9843 case OPTION_MADD_BND_PREFIX:
9847 case OPTION_MEVEXLIG:
9848 if (strcmp (arg, "128") == 0)
9850 else if (strcmp (arg, "256") == 0)
9852 else if (strcmp (arg, "512") == 0)
9855 as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
9858 case OPTION_MEVEXRCIG:
9859 if (strcmp (arg, "rne") == 0)
9861 else if (strcmp (arg, "rd") == 0)
9863 else if (strcmp (arg, "ru") == 0)
9865 else if (strcmp (arg, "rz") == 0)
9868 as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
9871 case OPTION_MEVEXWIG:
9872 if (strcmp (arg, "0") == 0)
9874 else if (strcmp (arg, "1") == 0)
9877 as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
9880 # if defined (TE_PE) || defined (TE_PEP)
9881 case OPTION_MBIG_OBJ:
9886 case OPTION_OMIT_LOCK_PREFIX:
9887 if (strcasecmp (arg, "yes") == 0)
9888 omit_lock_prefix = 1;
9889 else if (strcasecmp (arg, "no") == 0)
9890 omit_lock_prefix = 0;
9892 as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
9901 #define MESSAGE_TEMPLATE \
9905 show_arch (FILE *stream, int ext, int check)
9907 static char message[] = MESSAGE_TEMPLATE;
9908 char *start = message + 27;
9910 int size = sizeof (MESSAGE_TEMPLATE);
9917 left = size - (start - message);
9918 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9920 /* Should it be skipped? */
9921 if (cpu_arch [j].skip)
9924 name = cpu_arch [j].name;
9925 len = cpu_arch [j].len;
9928 /* It is an extension. Skip if we aren't asked to show it. */
9939 /* It is an processor. Skip if we show only extension. */
9942 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
9944 /* It is an impossible processor - skip. */
9948 /* Reserve 2 spaces for ", " or ",\0" */
9951 /* Check if there is any room. */
9959 p = mempcpy (p, name, len);
9963 /* Output the current message now and start a new one. */
9966 fprintf (stream, "%s\n", message);
9968 left = size - (start - message) - len - 2;
9970 gas_assert (left >= 0);
9972 p = mempcpy (p, name, len);
9977 fprintf (stream, "%s\n", message);
9981 md_show_usage (FILE *stream)
9983 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9984 fprintf (stream, _("\
9986 -V print assembler version number\n\
9989 fprintf (stream, _("\
9990 -n Do not optimize code alignment\n\
9991 -q quieten some warnings\n"));
9992 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9993 fprintf (stream, _("\
9996 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9997 || defined (TE_PE) || defined (TE_PEP))
9998 fprintf (stream, _("\
9999 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
10001 #ifdef SVR4_COMMENT_CHARS
10002 fprintf (stream, _("\
10003 --divide do not treat `/' as a comment character\n"));
10005 fprintf (stream, _("\
10006 --divide ignored\n"));
10008 fprintf (stream, _("\
10009 -march=CPU[,+EXTENSION...]\n\
10010 generate code for CPU and EXTENSION, CPU is one of:\n"));
10011 show_arch (stream, 0, 1);
10012 fprintf (stream, _("\
10013 EXTENSION is combination of:\n"));
10014 show_arch (stream, 1, 0);
10015 fprintf (stream, _("\
10016 -mtune=CPU optimize for CPU, CPU is one of:\n"));
10017 show_arch (stream, 0, 0);
10018 fprintf (stream, _("\
10019 -msse2avx encode SSE instructions with VEX prefix\n"));
10020 fprintf (stream, _("\
10021 -msse-check=[none|error|warning]\n\
10022 check SSE instructions\n"));
10023 fprintf (stream, _("\
10024 -moperand-check=[none|error|warning]\n\
10025 check operand combinations for validity\n"));
10026 fprintf (stream, _("\
10027 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
10029 fprintf (stream, _("\
10030 -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10032 fprintf (stream, _("\
10033 -mevexwig=[0|1] encode EVEX instructions with specific EVEX.W value\n\
10034 for EVEX.W bit ignored instructions\n"));
10035 fprintf (stream, _("\
10036 -mevexrcig=[rne|rd|ru|rz]\n\
10037 encode EVEX instructions with specific EVEX.RC value\n\
10038 for SAE-only ignored instructions\n"));
10039 fprintf (stream, _("\
10040 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
10041 fprintf (stream, _("\
10042 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
10043 fprintf (stream, _("\
10044 -mindex-reg support pseudo index registers\n"));
10045 fprintf (stream, _("\
10046 -mnaked-reg don't require `%%' prefix for registers\n"));
10047 fprintf (stream, _("\
10048 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
10049 fprintf (stream, _("\
10050 -madd-bnd-prefix add BND prefix for all valid branches\n"));
10051 # if defined (TE_PE) || defined (TE_PEP)
10052 fprintf (stream, _("\
10053 -mbig-obj generate big object files\n"));
10055 fprintf (stream, _("\
10056 -momit-lock-prefix=[no|yes]\n\
10057 strip all lock prefixes\n"));
10060 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10061 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10062 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10064 /* Pick the target format to use. */
10067 i386_target_format (void)
10069 if (!strncmp (default_arch, "x86_64", 6))
10071 update_code_flag (CODE_64BIT, 1);
10072 if (default_arch[6] == '\0')
10073 x86_elf_abi = X86_64_ABI;
10075 x86_elf_abi = X86_64_X32_ABI;
10077 else if (!strcmp (default_arch, "i386"))
10078 update_code_flag (CODE_32BIT, 1);
10080 as_fatal (_("unknown architecture"));
10082 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
10083 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10084 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
10085 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10087 switch (OUTPUT_FLAVOR)
10089 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10090 case bfd_target_aout_flavour:
10091 return AOUT_TARGET_FORMAT;
10093 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10094 # if defined (TE_PE) || defined (TE_PEP)
10095 case bfd_target_coff_flavour:
10096 if (flag_code == CODE_64BIT)
10097 return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
10100 # elif defined (TE_GO32)
10101 case bfd_target_coff_flavour:
10102 return "coff-go32";
10104 case bfd_target_coff_flavour:
10105 return "coff-i386";
10108 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10109 case bfd_target_elf_flavour:
10111 const char *format;
10113 switch (x86_elf_abi)
10116 format = ELF_TARGET_FORMAT;
10119 use_rela_relocations = 1;
10121 format = ELF_TARGET_FORMAT64;
10123 case X86_64_X32_ABI:
10124 use_rela_relocations = 1;
10126 disallow_64bit_reloc = 1;
10127 format = ELF_TARGET_FORMAT32;
10130 if (cpu_arch_isa == PROCESSOR_L1OM)
10132 if (x86_elf_abi != X86_64_ABI)
10133 as_fatal (_("Intel L1OM is 64bit only"));
10134 return ELF_TARGET_L1OM_FORMAT;
10136 if (cpu_arch_isa == PROCESSOR_K1OM)
10138 if (x86_elf_abi != X86_64_ABI)
10139 as_fatal (_("Intel K1OM is 64bit only"));
10140 return ELF_TARGET_K1OM_FORMAT;
10146 #if defined (OBJ_MACH_O)
10147 case bfd_target_mach_o_flavour:
10148 if (flag_code == CODE_64BIT)
10150 use_rela_relocations = 1;
10152 return "mach-o-x86-64";
10155 return "mach-o-i386";
10163 #endif /* OBJ_MAYBE_ more than one */
10165 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
10167 i386_elf_emit_arch_note (void)
10169 if (IS_ELF && cpu_arch_name != NULL)
10172 asection *seg = now_seg;
10173 subsegT subseg = now_subseg;
10174 Elf_Internal_Note i_note;
10175 Elf_External_Note e_note;
10176 asection *note_secp;
10179 /* Create the .note section. */
10180 note_secp = subseg_new (".note", 0);
10181 bfd_set_section_flags (stdoutput,
10183 SEC_HAS_CONTENTS | SEC_READONLY);
10185 /* Process the arch string. */
10186 len = strlen (cpu_arch_name);
10188 i_note.namesz = len + 1;
10190 i_note.type = NT_ARCH;
10191 p = frag_more (sizeof (e_note.namesz));
10192 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
10193 p = frag_more (sizeof (e_note.descsz));
10194 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
10195 p = frag_more (sizeof (e_note.type));
10196 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
10197 p = frag_more (len + 1);
10198 strcpy (p, cpu_arch_name);
10200 frag_align (2, 0, 0);
10202 subseg_set (seg, subseg);
10208 md_undefined_symbol (char *name)
10210 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10211 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10212 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10213 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10217 if (symbol_find (name))
10218 as_bad (_("GOT already in symbol table"));
10219 GOT_symbol = symbol_new (name, undefined_section,
10220 (valueT) 0, &zero_address_frag);
10227 /* Round up a section size to the appropriate boundary. */
10230 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10232 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10233 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10235 /* For a.out, force the section size to be aligned. If we don't do
10236 this, BFD will align it for us, but it will not write out the
10237 final bytes of the section. This may be a bug in BFD, but it is
10238 easier to fix it here since that is how the other a.out targets
10242 align = bfd_get_section_alignment (stdoutput, segment);
10243 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
10250 /* On the i386, PC-relative offsets are relative to the start of the
10251 next instruction. That is, the address of the offset, plus its
10252 size, since the offset is always the last part of the insn. */
10255 md_pcrel_from (fixS *fixP)
10257 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10263 s_bss (int ignore ATTRIBUTE_UNUSED)
10267 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10269 obj_elf_section_change_hook ();
10271 temp = get_absolute_expression ();
10272 subseg_set (bss_section, (subsegT) temp);
10273 demand_empty_rest_of_line ();
10279 i386_validate_fix (fixS *fixp)
10281 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
10283 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10287 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10292 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10294 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10296 fixp->fx_subsy = 0;
10301 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10304 bfd_reloc_code_real_type code;
10306 switch (fixp->fx_r_type)
10308 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10309 case BFD_RELOC_SIZE32:
10310 case BFD_RELOC_SIZE64:
10311 if (S_IS_DEFINED (fixp->fx_addsy)
10312 && !S_IS_EXTERNAL (fixp->fx_addsy))
10314 /* Resolve size relocation against local symbol to size of
10315 the symbol plus addend. */
10316 valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10317 if (fixp->fx_r_type == BFD_RELOC_SIZE32
10318 && !fits_in_unsigned_long (value))
10319 as_bad_where (fixp->fx_file, fixp->fx_line,
10320 _("symbol size computation overflow"));
10321 fixp->fx_addsy = NULL;
10322 fixp->fx_subsy = NULL;
10323 md_apply_fix (fixp, (valueT *) &value, NULL);
10328 case BFD_RELOC_X86_64_PLT32:
10329 case BFD_RELOC_X86_64_GOT32:
10330 case BFD_RELOC_X86_64_GOTPCREL:
10331 case BFD_RELOC_386_PLT32:
10332 case BFD_RELOC_386_GOT32:
10333 case BFD_RELOC_386_GOTOFF:
10334 case BFD_RELOC_386_GOTPC:
10335 case BFD_RELOC_386_TLS_GD:
10336 case BFD_RELOC_386_TLS_LDM:
10337 case BFD_RELOC_386_TLS_LDO_32:
10338 case BFD_RELOC_386_TLS_IE_32:
10339 case BFD_RELOC_386_TLS_IE:
10340 case BFD_RELOC_386_TLS_GOTIE:
10341 case BFD_RELOC_386_TLS_LE_32:
10342 case BFD_RELOC_386_TLS_LE:
10343 case BFD_RELOC_386_TLS_GOTDESC:
10344 case BFD_RELOC_386_TLS_DESC_CALL:
10345 case BFD_RELOC_X86_64_TLSGD:
10346 case BFD_RELOC_X86_64_TLSLD:
10347 case BFD_RELOC_X86_64_DTPOFF32:
10348 case BFD_RELOC_X86_64_DTPOFF64:
10349 case BFD_RELOC_X86_64_GOTTPOFF:
10350 case BFD_RELOC_X86_64_TPOFF32:
10351 case BFD_RELOC_X86_64_TPOFF64:
10352 case BFD_RELOC_X86_64_GOTOFF64:
10353 case BFD_RELOC_X86_64_GOTPC32:
10354 case BFD_RELOC_X86_64_GOT64:
10355 case BFD_RELOC_X86_64_GOTPCREL64:
10356 case BFD_RELOC_X86_64_GOTPC64:
10357 case BFD_RELOC_X86_64_GOTPLT64:
10358 case BFD_RELOC_X86_64_PLTOFF64:
10359 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10360 case BFD_RELOC_X86_64_TLSDESC_CALL:
10361 case BFD_RELOC_RVA:
10362 case BFD_RELOC_VTABLE_ENTRY:
10363 case BFD_RELOC_VTABLE_INHERIT:
10365 case BFD_RELOC_32_SECREL:
10367 code = fixp->fx_r_type;
10369 case BFD_RELOC_X86_64_32S:
10370 if (!fixp->fx_pcrel)
10372 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
10373 code = fixp->fx_r_type;
10377 if (fixp->fx_pcrel)
10379 switch (fixp->fx_size)
10382 as_bad_where (fixp->fx_file, fixp->fx_line,
10383 _("can not do %d byte pc-relative relocation"),
10385 code = BFD_RELOC_32_PCREL;
10387 case 1: code = BFD_RELOC_8_PCREL; break;
10388 case 2: code = BFD_RELOC_16_PCREL; break;
10389 case 4: code = BFD_RELOC_32_PCREL; break;
10391 case 8: code = BFD_RELOC_64_PCREL; break;
10397 switch (fixp->fx_size)
10400 as_bad_where (fixp->fx_file, fixp->fx_line,
10401 _("can not do %d byte relocation"),
10403 code = BFD_RELOC_32;
10405 case 1: code = BFD_RELOC_8; break;
10406 case 2: code = BFD_RELOC_16; break;
10407 case 4: code = BFD_RELOC_32; break;
10409 case 8: code = BFD_RELOC_64; break;
10416 if ((code == BFD_RELOC_32
10417 || code == BFD_RELOC_32_PCREL
10418 || code == BFD_RELOC_X86_64_32S)
10420 && fixp->fx_addsy == GOT_symbol)
10423 code = BFD_RELOC_386_GOTPC;
10425 code = BFD_RELOC_X86_64_GOTPC32;
10427 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10429 && fixp->fx_addsy == GOT_symbol)
10431 code = BFD_RELOC_X86_64_GOTPC64;
10434 rel = (arelent *) xmalloc (sizeof (arelent));
10435 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
10436 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10438 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10440 if (!use_rela_relocations)
10442 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10443 vtable entry to be used in the relocation's section offset. */
10444 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10445 rel->address = fixp->fx_offset;
10446 #if defined (OBJ_COFF) && defined (TE_PE)
10447 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10448 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10453 /* Use the rela in 64bit mode. */
10456 if (disallow_64bit_reloc)
10459 case BFD_RELOC_X86_64_DTPOFF64:
10460 case BFD_RELOC_X86_64_TPOFF64:
10461 case BFD_RELOC_64_PCREL:
10462 case BFD_RELOC_X86_64_GOTOFF64:
10463 case BFD_RELOC_X86_64_GOT64:
10464 case BFD_RELOC_X86_64_GOTPCREL64:
10465 case BFD_RELOC_X86_64_GOTPC64:
10466 case BFD_RELOC_X86_64_GOTPLT64:
10467 case BFD_RELOC_X86_64_PLTOFF64:
10468 as_bad_where (fixp->fx_file, fixp->fx_line,
10469 _("cannot represent relocation type %s in x32 mode"),
10470 bfd_get_reloc_code_name (code));
10476 if (!fixp->fx_pcrel)
10477 rel->addend = fixp->fx_offset;
10481 case BFD_RELOC_X86_64_PLT32:
10482 case BFD_RELOC_X86_64_GOT32:
10483 case BFD_RELOC_X86_64_GOTPCREL:
10484 case BFD_RELOC_X86_64_TLSGD:
10485 case BFD_RELOC_X86_64_TLSLD:
10486 case BFD_RELOC_X86_64_GOTTPOFF:
10487 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10488 case BFD_RELOC_X86_64_TLSDESC_CALL:
10489 rel->addend = fixp->fx_offset - fixp->fx_size;
10492 rel->addend = (section->vma
10494 + fixp->fx_addnumber
10495 + md_pcrel_from (fixp));
10500 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10501 if (rel->howto == NULL)
10503 as_bad_where (fixp->fx_file, fixp->fx_line,
10504 _("cannot represent relocation type %s"),
10505 bfd_get_reloc_code_name (code));
10506 /* Set howto to a garbage value so that we can keep going. */
10507 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10508 gas_assert (rel->howto != NULL);
10514 #include "tc-i386-intel.c"
10517 tc_x86_parse_to_dw2regnum (expressionS *exp)
10519 int saved_naked_reg;
10520 char saved_register_dot;
10522 saved_naked_reg = allow_naked_reg;
10523 allow_naked_reg = 1;
10524 saved_register_dot = register_chars['.'];
10525 register_chars['.'] = '.';
10526 allow_pseudo_reg = 1;
10527 expression_and_evaluate (exp);
10528 allow_pseudo_reg = 0;
10529 register_chars['.'] = saved_register_dot;
10530 allow_naked_reg = saved_naked_reg;
10532 if (exp->X_op == O_register && exp->X_add_number >= 0)
10534 if ((addressT) exp->X_add_number < i386_regtab_size)
10536 exp->X_op = O_constant;
10537 exp->X_add_number = i386_regtab[exp->X_add_number]
10538 .dw2_regnum[flag_code >> 1];
10541 exp->X_op = O_illegal;
10546 tc_x86_frame_initial_instructions (void)
10548 static unsigned int sp_regno[2];
10550 if (!sp_regno[flag_code >> 1])
10552 char *saved_input = input_line_pointer;
10553 char sp[][4] = {"esp", "rsp"};
10556 input_line_pointer = sp[flag_code >> 1];
10557 tc_x86_parse_to_dw2regnum (&exp);
10558 gas_assert (exp.X_op == O_constant);
10559 sp_regno[flag_code >> 1] = exp.X_add_number;
10560 input_line_pointer = saved_input;
10563 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10564 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10568 x86_dwarf2_addr_size (void)
10570 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10571 if (x86_elf_abi == X86_64_X32_ABI)
10574 return bfd_arch_bits_per_address (stdoutput) / 8;
10578 i386_elf_section_type (const char *str, size_t len)
10580 if (flag_code == CODE_64BIT
10581 && len == sizeof ("unwind") - 1
10582 && strncmp (str, "unwind", 6) == 0)
10583 return SHT_X86_64_UNWIND;
10590 i386_solaris_fix_up_eh_frame (segT sec)
10592 if (flag_code == CODE_64BIT)
10593 elf_section_type (sec) = SHT_X86_64_UNWIND;
10599 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10603 exp.X_op = O_secrel;
10604 exp.X_add_symbol = symbol;
10605 exp.X_add_number = 0;
10606 emit_expr (&exp, size);
10610 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10611 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
10614 x86_64_section_letter (int letter, char **ptr_msg)
10616 if (flag_code == CODE_64BIT)
10619 return SHF_X86_64_LARGE;
10621 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
10624 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
10629 x86_64_section_word (char *str, size_t len)
10631 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
10632 return SHF_X86_64_LARGE;
10638 handle_large_common (int small ATTRIBUTE_UNUSED)
10640 if (flag_code != CODE_64BIT)
10642 s_comm_internal (0, elf_common_parse);
10643 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
10647 static segT lbss_section;
10648 asection *saved_com_section_ptr = elf_com_section_ptr;
10649 asection *saved_bss_section = bss_section;
10651 if (lbss_section == NULL)
10653 flagword applicable;
10654 segT seg = now_seg;
10655 subsegT subseg = now_subseg;
10657 /* The .lbss section is for local .largecomm symbols. */
10658 lbss_section = subseg_new (".lbss", 0);
10659 applicable = bfd_applicable_section_flags (stdoutput);
10660 bfd_set_section_flags (stdoutput, lbss_section,
10661 applicable & SEC_ALLOC);
10662 seg_info (lbss_section)->bss = 1;
10664 subseg_set (seg, subseg);
10667 elf_com_section_ptr = &_bfd_elf_large_com_section;
10668 bss_section = lbss_section;
10670 s_comm_internal (0, elf_common_parse);
10672 elf_com_section_ptr = saved_com_section_ptr;
10673 bss_section = saved_bss_section;
10676 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */