1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
32 #include "safe-ctype.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
48 #define DEFAULT_ARCH "i386"
53 #define INLINE __inline__
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
69 #define HLE_PREFIX REP_PREFIX
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
92 #define END_OF_INSN '\0'
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
103 const insn_template *start;
104 const insn_template *end;
108 /* 386 operand encoding bytes: see 386 book for details of this. */
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
120 /* 386 opcode byte to code indirect addressing. */
129 /* x86 arch names, types and features */
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_sse_check (int);
148 static void set_cpu_arch (int);
150 static void pe_directive_secrel (int);
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
183 static void s_bss (int);
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
189 static const char *default_arch = DEFAULT_ARCH;
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
201 /* 'md_assemble ()' gathers together information and puts it into a
208 const reg_entry *regs;
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
220 unsupported_with_intel_mnemonic,
223 invalid_vsib_address,
224 unsupported_vector_index_register
229 /* TM holds the template for the insn were currently assembling. */
232 /* SUFFIX holds the instruction size suffix for byte, word, dword
233 or qword, if given. */
236 /* OPERANDS gives the number of given operands. */
237 unsigned int operands;
239 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
240 of given register, displacement, memory operands and immediate
242 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244 /* TYPES [i] is the type (see above #defines) which tells us how to
245 use OP[i] for the corresponding operand. */
246 i386_operand_type types[MAX_OPERANDS];
248 /* Displacement expression, immediate expression, or register for each
250 union i386_op op[MAX_OPERANDS];
252 /* Flags for operands. */
253 unsigned int flags[MAX_OPERANDS];
254 #define Operand_PCrel 1
256 /* Relocation type for operand */
257 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
260 the base index byte below. */
261 const reg_entry *base_reg;
262 const reg_entry *index_reg;
263 unsigned int log2_scale_factor;
265 /* SEG gives the seg_entries of this insn. They are zero unless
266 explicit segment overrides are given. */
267 const seg_entry *seg[2];
269 /* PREFIX holds all the given prefix opcodes (usually null).
270 PREFIXES is the number of prefix opcodes. */
271 unsigned int prefixes;
272 unsigned char prefix[MAX_PREFIXES];
274 /* RM and SIB are the modrm byte and the sib byte where the
275 addressing modes of this insn are encoded. */
281 /* Swap operand in encoding. */
282 unsigned int swap_operand;
284 /* Prefer 8bit or 32bit displacement in encoding. */
287 disp_encoding_default = 0,
292 /* Have HLE prefix. */
293 unsigned int have_hle;
296 enum i386_error error;
299 typedef struct _i386_insn i386_insn;
301 /* List of chars besides those in app.c:symbol_chars that can start an
302 operand. Used to prevent the scrubber eating vital white-space. */
303 const char extra_symbol_chars[] = "*%-(["
312 #if (defined (TE_I386AIX) \
313 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
314 && !defined (TE_GNU) \
315 && !defined (TE_LINUX) \
316 && !defined (TE_NACL) \
317 && !defined (TE_NETWARE) \
318 && !defined (TE_FreeBSD) \
319 && !defined (TE_DragonFly) \
320 && !defined (TE_NetBSD)))
321 /* This array holds the chars that always start a comment. If the
322 pre-processor is disabled, these aren't very useful. The option
323 --divide will remove '/' from this list. */
324 const char *i386_comment_chars = "#/";
325 #define SVR4_COMMENT_CHARS 1
326 #define PREFIX_SEPARATOR '\\'
329 const char *i386_comment_chars = "#";
330 #define PREFIX_SEPARATOR '/'
333 /* This array holds the chars that only start a comment at the beginning of
334 a line. If the line seems to have the form '# 123 filename'
335 .line and .file directives will appear in the pre-processed output.
336 Note that input_file.c hand checks for '#' at the beginning of the
337 first line of the input file. This is because the compiler outputs
338 #NO_APP at the beginning of its output.
339 Also note that comments started like this one will always work if
340 '/' isn't otherwise defined. */
341 const char line_comment_chars[] = "#/";
343 const char line_separator_chars[] = ";";
345 /* Chars that can be used to separate mant from exp in floating point
347 const char EXP_CHARS[] = "eE";
349 /* Chars that mean this number is a floating point constant
352 const char FLT_CHARS[] = "fFdDxX";
354 /* Tables for lexical analysis. */
355 static char mnemonic_chars[256];
356 static char register_chars[256];
357 static char operand_chars[256];
358 static char identifier_chars[256];
359 static char digit_chars[256];
361 /* Lexical macros. */
362 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
363 #define is_operand_char(x) (operand_chars[(unsigned char) x])
364 #define is_register_char(x) (register_chars[(unsigned char) x])
365 #define is_space_char(x) ((x) == ' ')
366 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
367 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369 /* All non-digit non-letter characters that may occur in an operand. */
370 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372 /* md_assemble() always leaves the strings it's passed unaltered. To
373 effect this we maintain a stack of saved characters that we've smashed
374 with '\0's (indicating end of strings for various sub-fields of the
375 assembler instruction). */
376 static char save_stack[32];
377 static char *save_stack_p;
378 #define END_STRING_AND_SAVE(s) \
379 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
380 #define RESTORE_END_STRING(s) \
381 do { *(s) = *--save_stack_p; } while (0)
383 /* The instruction we're assembling. */
386 /* Possible templates for current insn. */
387 static const templates *current_templates;
389 /* Per instruction expressionS buffers: max displacements & immediates. */
390 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
391 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393 /* Current operand we are working on. */
394 static int this_operand = -1;
396 /* We support four different modes. FLAG_CODE variable is used to distinguish
404 static enum flag_code flag_code;
405 static unsigned int object_64bit;
406 static unsigned int disallow_64bit_reloc;
407 static int use_rela_relocations = 0;
409 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
410 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
411 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413 /* The ELF ABI to use. */
421 static enum x86_elf_abi x86_elf_abi = I386_ABI;
424 /* The names used to print error messages. */
425 static const char *flag_code_names[] =
432 /* 1 for intel syntax,
434 static int intel_syntax = 0;
436 /* 1 for intel mnemonic,
437 0 if att mnemonic. */
438 static int intel_mnemonic = !SYSV386_COMPAT;
440 /* 1 if support old (<= 2.8.1) versions of gcc. */
441 static int old_gcc = OLDGCC_COMPAT;
443 /* 1 if pseudo registers are permitted. */
444 static int allow_pseudo_reg = 0;
446 /* 1 if register prefix % not required. */
447 static int allow_naked_reg = 0;
449 /* 1 if pseudo index register, eiz/riz, is allowed . */
450 static int allow_index_reg = 0;
460 /* Register prefix used for error message. */
461 static const char *register_prefix = "%";
463 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
464 leave, push, and pop instructions so that gcc has the same stack
465 frame as in 32 bit mode. */
466 static char stackop_size = '\0';
468 /* Non-zero to optimize code alignment. */
469 int optimize_align_code = 1;
471 /* Non-zero to quieten some warnings. */
472 static int quiet_warnings = 0;
475 static const char *cpu_arch_name = NULL;
476 static char *cpu_sub_arch_name = NULL;
478 /* CPU feature flags. */
479 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481 /* If we have selected a cpu we are generating instructions for. */
482 static int cpu_arch_tune_set = 0;
484 /* Cpu we are generating instructions for. */
485 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487 /* CPU feature flags of cpu we are generating instructions for. */
488 static i386_cpu_flags cpu_arch_tune_flags;
490 /* CPU instruction set architecture used. */
491 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493 /* CPU feature flags of instruction set architecture used. */
494 i386_cpu_flags cpu_arch_isa_flags;
496 /* If set, conditional jumps are not automatically promoted to handle
497 larger than a byte offset. */
498 static unsigned int no_cond_jump_promotion = 0;
500 /* Encode SSE instructions with VEX prefix. */
501 static unsigned int sse2avx;
503 /* Encode scalar AVX instructions with specific vector length. */
510 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
511 static symbolS *GOT_symbol;
513 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
514 unsigned int x86_dwarf2_return_column;
516 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
517 int x86_cie_data_alignment;
519 /* Interface to relax_segment.
520 There are 3 major relax states for 386 jump insns because the
521 different types of jumps add different sizes to frags when we're
522 figuring out what sort of jump to choose to reach a given label. */
525 #define UNCOND_JUMP 0
527 #define COND_JUMP86 2
532 #define SMALL16 (SMALL | CODE16)
534 #define BIG16 (BIG | CODE16)
538 #define INLINE __inline__
544 #define ENCODE_RELAX_STATE(type, size) \
545 ((relax_substateT) (((type) << 2) | (size)))
546 #define TYPE_FROM_RELAX_STATE(s) \
548 #define DISP_SIZE_FROM_RELAX_STATE(s) \
549 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551 /* This table is used by relax_frag to promote short jumps to long
552 ones where necessary. SMALL (short) jumps may be promoted to BIG
553 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
554 don't allow a short jump in a 32 bit code segment to be promoted to
555 a 16 bit offset jump because it's slower (requires data size
556 prefix), and doesn't work, unless the destination is in the bottom
557 64k of the code segment (The top 16 bits of eip are zeroed). */
559 const relax_typeS md_relax_table[] =
562 1) most positive reach of this state,
563 2) most negative reach of this state,
564 3) how many bytes this mode will have in the variable part of the frag
565 4) which index into the table to try if we can't fit into this one. */
567 /* UNCOND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
570 /* dword jmp adds 4 bytes to frag:
571 0 extra opcode bytes, 4 displacement bytes. */
573 /* word jmp adds 2 byte2 to frag:
574 0 extra opcode bytes, 2 displacement bytes. */
577 /* COND_JUMP states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
583 /* word conditionals add 3 bytes to frag:
584 1 extra opcode byte, 2 displacement bytes. */
587 /* COND_JUMP86 states. */
588 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
590 /* dword conditionals adds 5 bytes to frag:
591 1 extra opcode byte, 4 displacement bytes. */
593 /* word conditionals add 4 bytes to frag:
594 1 displacement byte and a 3 byte long branch insn. */
598 static const arch_entry cpu_arch[] =
600 /* Do not replace the first two entries - i386_target_format()
601 relies on them being there in this order. */
602 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
603 CPU_GENERIC32_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
605 CPU_GENERIC64_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
607 CPU_NONE_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
609 CPU_I186_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
611 CPU_I286_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
613 CPU_I386_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
615 CPU_I486_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
617 CPU_I586_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
619 CPU_I686_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
621 CPU_I586_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
623 CPU_PENTIUMPRO_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
625 CPU_P2_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
627 CPU_P3_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
629 CPU_P4_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
631 CPU_CORE_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
633 CPU_NOCONA_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
635 CPU_CORE_FLAGS, 1, 0 },
636 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
637 CPU_CORE_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
639 CPU_CORE2_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
641 CPU_CORE2_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
643 CPU_COREI7_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
645 CPU_L1OM_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
647 CPU_K1OM_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
649 CPU_K6_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
651 CPU_K6_2_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
653 CPU_ATHLON_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
655 CPU_K8_FLAGS, 1, 0 },
656 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
657 CPU_K8_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
659 CPU_K8_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
661 CPU_AMDFAM10_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
663 CPU_BDVER1_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
665 CPU_BDVER2_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
667 CPU_8087_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
669 CPU_287_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
671 CPU_387_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
673 CPU_ANY87_FLAGS, 0, 1 },
674 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
675 CPU_MMX_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
677 CPU_3DNOWA_FLAGS, 0, 1 },
678 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
679 CPU_SSE_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
681 CPU_SSE2_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
683 CPU_SSE3_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
685 CPU_SSSE3_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
687 CPU_SSE4_1_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
689 CPU_SSE4_2_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
691 CPU_SSE4_2_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
693 CPU_ANY_SSE_FLAGS, 0, 1 },
694 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
695 CPU_AVX_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
697 CPU_AVX2_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
699 CPU_ANY_AVX_FLAGS, 0, 1 },
700 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
701 CPU_VMX_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
703 CPU_VMFUNC_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
705 CPU_SMX_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
707 CPU_XSAVE_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
709 CPU_XSAVEOPT_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
711 CPU_AES_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
713 CPU_PCLMUL_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
715 CPU_PCLMUL_FLAGS, 1, 0 },
716 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
717 CPU_FSGSBASE_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
719 CPU_RDRND_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
721 CPU_F16C_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
723 CPU_BMI2_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
725 CPU_FMA_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
727 CPU_FMA4_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
729 CPU_XOP_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
731 CPU_LWP_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
733 CPU_MOVBE_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
735 CPU_EPT_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
737 CPU_LZCNT_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
739 CPU_HLE_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
741 CPU_RTM_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
743 CPU_INVPCID_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
745 CPU_CLFLUSH_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
747 CPU_NOP_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
749 CPU_SYSCALL_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
751 CPU_RDTSCP_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
753 CPU_3DNOW_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
755 CPU_3DNOWA_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
757 CPU_PADLOCK_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
759 CPU_SVME_FLAGS, 1, 0 },
760 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
761 CPU_SVME_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
763 CPU_SSE4A_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
765 CPU_ABM_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
767 CPU_BMI_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
769 CPU_TBM_FLAGS, 0, 0 },
770 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
771 CPU_ADX_FLAGS, 0, 0 },
772 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
773 CPU_RDSEED_FLAGS, 0, 0 },
774 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
775 CPU_PRFCHW_FLAGS, 0, 0 },
779 /* Like s_lcomm_internal in gas/read.c but the alignment string
780 is allowed to be optional. */
783 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
790 && *input_line_pointer == ',')
792 align = parse_align (needs_align - 1);
794 if (align == (addressT) -1)
809 bss_alloc (symbolP, size, align);
814 pe_lcomm (int needs_align)
816 s_comm_internal (needs_align * 2, pe_lcomm_internal);
820 const pseudo_typeS md_pseudo_table[] =
822 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
823 {"align", s_align_bytes, 0},
825 {"align", s_align_ptwo, 0},
827 {"arch", set_cpu_arch, 0},
831 {"lcomm", pe_lcomm, 1},
833 {"ffloat", float_cons, 'f'},
834 {"dfloat", float_cons, 'd'},
835 {"tfloat", float_cons, 'x'},
837 {"slong", signed_cons, 4},
838 {"noopt", s_ignore, 0},
839 {"optim", s_ignore, 0},
840 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
841 {"code16", set_code_flag, CODE_16BIT},
842 {"code32", set_code_flag, CODE_32BIT},
843 {"code64", set_code_flag, CODE_64BIT},
844 {"intel_syntax", set_intel_syntax, 1},
845 {"att_syntax", set_intel_syntax, 0},
846 {"intel_mnemonic", set_intel_mnemonic, 1},
847 {"att_mnemonic", set_intel_mnemonic, 0},
848 {"allow_index_reg", set_allow_index_reg, 1},
849 {"disallow_index_reg", set_allow_index_reg, 0},
850 {"sse_check", set_sse_check, 0},
851 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
852 {"largecomm", handle_large_common, 0},
854 {"file", (void (*) (int)) dwarf2_directive_file, 0},
855 {"loc", dwarf2_directive_loc, 0},
856 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
859 {"secrel32", pe_directive_secrel, 0},
864 /* For interface with expression (). */
865 extern char *input_line_pointer;
867 /* Hash table for instruction mnemonic lookup. */
868 static struct hash_control *op_hash;
870 /* Hash table for register lookup. */
871 static struct hash_control *reg_hash;
874 i386_align_code (fragS *fragP, int count)
876 /* Various efficient no-op patterns for aligning code labels.
877 Note: Don't try to assemble the instructions in the comments.
878 0L and 0w are not legal. */
879 static const char f32_1[] =
881 static const char f32_2[] =
882 {0x66,0x90}; /* xchg %ax,%ax */
883 static const char f32_3[] =
884 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
885 static const char f32_4[] =
886 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
887 static const char f32_5[] =
889 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
890 static const char f32_6[] =
891 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
892 static const char f32_7[] =
893 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
894 static const char f32_8[] =
896 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
897 static const char f32_9[] =
898 {0x89,0xf6, /* movl %esi,%esi */
899 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
900 static const char f32_10[] =
901 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
902 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
903 static const char f32_11[] =
904 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
905 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
906 static const char f32_12[] =
907 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
908 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
909 static const char f32_13[] =
910 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
911 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
912 static const char f32_14[] =
913 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
914 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
915 static const char f16_3[] =
916 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
917 static const char f16_4[] =
918 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
919 static const char f16_5[] =
921 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
922 static const char f16_6[] =
923 {0x89,0xf6, /* mov %si,%si */
924 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
925 static const char f16_7[] =
926 {0x8d,0x74,0x00, /* lea 0(%si),%si */
927 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
928 static const char f16_8[] =
929 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
930 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
931 static const char jump_31[] =
932 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
933 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
934 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
935 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
936 static const char *const f32_patt[] = {
937 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
938 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
940 static const char *const f16_patt[] = {
941 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
944 static const char alt_3[] =
946 /* nopl 0(%[re]ax) */
947 static const char alt_4[] =
948 {0x0f,0x1f,0x40,0x00};
949 /* nopl 0(%[re]ax,%[re]ax,1) */
950 static const char alt_5[] =
951 {0x0f,0x1f,0x44,0x00,0x00};
952 /* nopw 0(%[re]ax,%[re]ax,1) */
953 static const char alt_6[] =
954 {0x66,0x0f,0x1f,0x44,0x00,0x00};
955 /* nopl 0L(%[re]ax) */
956 static const char alt_7[] =
957 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
958 /* nopl 0L(%[re]ax,%[re]ax,1) */
959 static const char alt_8[] =
960 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
961 /* nopw 0L(%[re]ax,%[re]ax,1) */
962 static const char alt_9[] =
963 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
964 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
965 static const char alt_10[] =
966 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
968 nopw %cs:0L(%[re]ax,%[re]ax,1) */
969 static const char alt_long_11[] =
971 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
974 nopw %cs:0L(%[re]ax,%[re]ax,1) */
975 static const char alt_long_12[] =
978 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
982 nopw %cs:0L(%[re]ax,%[re]ax,1) */
983 static const char alt_long_13[] =
987 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
992 nopw %cs:0L(%[re]ax,%[re]ax,1) */
993 static const char alt_long_14[] =
998 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1004 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1005 static const char alt_long_15[] =
1011 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1012 /* nopl 0(%[re]ax,%[re]ax,1)
1013 nopw 0(%[re]ax,%[re]ax,1) */
1014 static const char alt_short_11[] =
1015 {0x0f,0x1f,0x44,0x00,0x00,
1016 0x66,0x0f,0x1f,0x44,0x00,0x00};
1017 /* nopw 0(%[re]ax,%[re]ax,1)
1018 nopw 0(%[re]ax,%[re]ax,1) */
1019 static const char alt_short_12[] =
1020 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1021 0x66,0x0f,0x1f,0x44,0x00,0x00};
1022 /* nopw 0(%[re]ax,%[re]ax,1)
1024 static const char alt_short_13[] =
1025 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1026 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1029 static const char alt_short_14[] =
1030 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1031 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1033 nopl 0L(%[re]ax,%[re]ax,1) */
1034 static const char alt_short_15[] =
1035 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1036 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1037 static const char *const alt_short_patt[] = {
1038 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1039 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1040 alt_short_14, alt_short_15
1042 static const char *const alt_long_patt[] = {
1043 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1044 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1045 alt_long_14, alt_long_15
1048 /* Only align for at least a positive non-zero boundary. */
1049 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1052 /* We need to decide which NOP sequence to use for 32bit and
1053 64bit. When -mtune= is used:
1055 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1056 PROCESSOR_GENERIC32, f32_patt will be used.
1057 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1058 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1059 PROCESSOR_GENERIC64, alt_long_patt will be used.
1060 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1061 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1064 When -mtune= isn't used, alt_long_patt will be used if
1065 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1068 When -march= or .arch is used, we can't use anything beyond
1069 cpu_arch_isa_flags. */
1071 if (flag_code == CODE_16BIT)
1075 memcpy (fragP->fr_literal + fragP->fr_fix,
1077 /* Adjust jump offset. */
1078 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1081 memcpy (fragP->fr_literal + fragP->fr_fix,
1082 f16_patt[count - 1], count);
1086 const char *const *patt = NULL;
1088 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1090 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1091 switch (cpu_arch_tune)
1093 case PROCESSOR_UNKNOWN:
1094 /* We use cpu_arch_isa_flags to check if we SHOULD
1095 optimize with nops. */
1096 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1097 patt = alt_long_patt;
1101 case PROCESSOR_PENTIUM4:
1102 case PROCESSOR_NOCONA:
1103 case PROCESSOR_CORE:
1104 case PROCESSOR_CORE2:
1105 case PROCESSOR_COREI7:
1106 case PROCESSOR_L1OM:
1107 case PROCESSOR_K1OM:
1108 case PROCESSOR_GENERIC64:
1109 patt = alt_long_patt;
1112 case PROCESSOR_ATHLON:
1114 case PROCESSOR_AMDFAM10:
1116 patt = alt_short_patt;
1118 case PROCESSOR_I386:
1119 case PROCESSOR_I486:
1120 case PROCESSOR_PENTIUM:
1121 case PROCESSOR_PENTIUMPRO:
1122 case PROCESSOR_GENERIC32:
1129 switch (fragP->tc_frag_data.tune)
1131 case PROCESSOR_UNKNOWN:
1132 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1133 PROCESSOR_UNKNOWN. */
1137 case PROCESSOR_I386:
1138 case PROCESSOR_I486:
1139 case PROCESSOR_PENTIUM:
1141 case PROCESSOR_ATHLON:
1143 case PROCESSOR_AMDFAM10:
1145 case PROCESSOR_GENERIC32:
1146 /* We use cpu_arch_isa_flags to check if we CAN optimize
1148 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1149 patt = alt_short_patt;
1153 case PROCESSOR_PENTIUMPRO:
1154 case PROCESSOR_PENTIUM4:
1155 case PROCESSOR_NOCONA:
1156 case PROCESSOR_CORE:
1157 case PROCESSOR_CORE2:
1158 case PROCESSOR_COREI7:
1159 case PROCESSOR_L1OM:
1160 case PROCESSOR_K1OM:
1161 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1162 patt = alt_long_patt;
1166 case PROCESSOR_GENERIC64:
1167 patt = alt_long_patt;
1172 if (patt == f32_patt)
1174 /* If the padding is less than 15 bytes, we use the normal
1175 ones. Otherwise, we use a jump instruction and adjust
1179 /* For 64bit, the limit is 3 bytes. */
1180 if (flag_code == CODE_64BIT
1181 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1186 memcpy (fragP->fr_literal + fragP->fr_fix,
1187 patt[count - 1], count);
1190 memcpy (fragP->fr_literal + fragP->fr_fix,
1192 /* Adjust jump offset. */
1193 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1198 /* Maximum length of an instruction is 15 byte. If the
1199 padding is greater than 15 bytes and we don't use jump,
1200 we have to break it into smaller pieces. */
1201 int padding = count;
1202 while (padding > 15)
1205 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1210 memcpy (fragP->fr_literal + fragP->fr_fix,
1211 patt [padding - 1], padding);
1214 fragP->fr_var = count;
1218 operand_type_all_zero (const union i386_operand_type *x)
1220 switch (ARRAY_SIZE(x->array))
1229 return !x->array[0];
1236 operand_type_set (union i386_operand_type *x, unsigned int v)
1238 switch (ARRAY_SIZE(x->array))
1253 operand_type_equal (const union i386_operand_type *x,
1254 const union i386_operand_type *y)
1256 switch (ARRAY_SIZE(x->array))
1259 if (x->array[2] != y->array[2])
1262 if (x->array[1] != y->array[1])
1265 return x->array[0] == y->array[0];
1273 cpu_flags_all_zero (const union i386_cpu_flags *x)
1275 switch (ARRAY_SIZE(x->array))
1284 return !x->array[0];
1291 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1293 switch (ARRAY_SIZE(x->array))
1308 cpu_flags_equal (const union i386_cpu_flags *x,
1309 const union i386_cpu_flags *y)
1311 switch (ARRAY_SIZE(x->array))
1314 if (x->array[2] != y->array[2])
1317 if (x->array[1] != y->array[1])
1320 return x->array[0] == y->array[0];
1328 cpu_flags_check_cpu64 (i386_cpu_flags f)
1330 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1331 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1334 static INLINE i386_cpu_flags
1335 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1337 switch (ARRAY_SIZE (x.array))
1340 x.array [2] &= y.array [2];
1342 x.array [1] &= y.array [1];
1344 x.array [0] &= y.array [0];
1352 static INLINE i386_cpu_flags
1353 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1355 switch (ARRAY_SIZE (x.array))
1358 x.array [2] |= y.array [2];
1360 x.array [1] |= y.array [1];
1362 x.array [0] |= y.array [0];
1370 static INLINE i386_cpu_flags
1371 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1373 switch (ARRAY_SIZE (x.array))
1376 x.array [2] &= ~y.array [2];
1378 x.array [1] &= ~y.array [1];
1380 x.array [0] &= ~y.array [0];
1388 #define CPU_FLAGS_ARCH_MATCH 0x1
1389 #define CPU_FLAGS_64BIT_MATCH 0x2
1390 #define CPU_FLAGS_AES_MATCH 0x4
1391 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1392 #define CPU_FLAGS_AVX_MATCH 0x10
1394 #define CPU_FLAGS_32BIT_MATCH \
1395 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1396 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1397 #define CPU_FLAGS_PERFECT_MATCH \
1398 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1400 /* Return CPU flags match bits. */
1403 cpu_flags_match (const insn_template *t)
1405 i386_cpu_flags x = t->cpu_flags;
1406 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1408 x.bitfield.cpu64 = 0;
1409 x.bitfield.cpuno64 = 0;
1411 if (cpu_flags_all_zero (&x))
1413 /* This instruction is available on all archs. */
1414 match |= CPU_FLAGS_32BIT_MATCH;
1418 /* This instruction is available only on some archs. */
1419 i386_cpu_flags cpu = cpu_arch_flags;
1421 cpu.bitfield.cpu64 = 0;
1422 cpu.bitfield.cpuno64 = 0;
1423 cpu = cpu_flags_and (x, cpu);
1424 if (!cpu_flags_all_zero (&cpu))
1426 if (x.bitfield.cpuavx)
1428 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1429 if (cpu.bitfield.cpuavx)
1431 /* Check SSE2AVX. */
1432 if (!t->opcode_modifier.sse2avx|| sse2avx)
1434 match |= (CPU_FLAGS_ARCH_MATCH
1435 | CPU_FLAGS_AVX_MATCH);
1437 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1438 match |= CPU_FLAGS_AES_MATCH;
1440 if (!x.bitfield.cpupclmul
1441 || cpu.bitfield.cpupclmul)
1442 match |= CPU_FLAGS_PCLMUL_MATCH;
1446 match |= CPU_FLAGS_ARCH_MATCH;
1449 match |= CPU_FLAGS_32BIT_MATCH;
1455 static INLINE i386_operand_type
1456 operand_type_and (i386_operand_type x, i386_operand_type y)
1458 switch (ARRAY_SIZE (x.array))
1461 x.array [2] &= y.array [2];
1463 x.array [1] &= y.array [1];
1465 x.array [0] &= y.array [0];
1473 static INLINE i386_operand_type
1474 operand_type_or (i386_operand_type x, i386_operand_type y)
1476 switch (ARRAY_SIZE (x.array))
1479 x.array [2] |= y.array [2];
1481 x.array [1] |= y.array [1];
1483 x.array [0] |= y.array [0];
1491 static INLINE i386_operand_type
1492 operand_type_xor (i386_operand_type x, i386_operand_type y)
1494 switch (ARRAY_SIZE (x.array))
1497 x.array [2] ^= y.array [2];
1499 x.array [1] ^= y.array [1];
1501 x.array [0] ^= y.array [0];
1509 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1510 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1511 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1512 static const i386_operand_type inoutportreg
1513 = OPERAND_TYPE_INOUTPORTREG;
1514 static const i386_operand_type reg16_inoutportreg
1515 = OPERAND_TYPE_REG16_INOUTPORTREG;
1516 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1517 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1518 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1519 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1520 static const i386_operand_type anydisp
1521 = OPERAND_TYPE_ANYDISP;
1522 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1523 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1524 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1525 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1526 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1527 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1528 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1529 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1530 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1531 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1532 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1533 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1544 operand_type_check (i386_operand_type t, enum operand_type c)
1549 return (t.bitfield.reg8
1552 || t.bitfield.reg64);
1555 return (t.bitfield.imm8
1559 || t.bitfield.imm32s
1560 || t.bitfield.imm64);
1563 return (t.bitfield.disp8
1564 || t.bitfield.disp16
1565 || t.bitfield.disp32
1566 || t.bitfield.disp32s
1567 || t.bitfield.disp64);
1570 return (t.bitfield.disp8
1571 || t.bitfield.disp16
1572 || t.bitfield.disp32
1573 || t.bitfield.disp32s
1574 || t.bitfield.disp64
1575 || t.bitfield.baseindex);
1584 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1585 operand J for instruction template T. */
1588 match_reg_size (const insn_template *t, unsigned int j)
1590 return !((i.types[j].bitfield.byte
1591 && !t->operand_types[j].bitfield.byte)
1592 || (i.types[j].bitfield.word
1593 && !t->operand_types[j].bitfield.word)
1594 || (i.types[j].bitfield.dword
1595 && !t->operand_types[j].bitfield.dword)
1596 || (i.types[j].bitfield.qword
1597 && !t->operand_types[j].bitfield.qword));
1600 /* Return 1 if there is no conflict in any size on operand J for
1601 instruction template T. */
1604 match_mem_size (const insn_template *t, unsigned int j)
1606 return (match_reg_size (t, j)
1607 && !((i.types[j].bitfield.unspecified
1608 && !t->operand_types[j].bitfield.unspecified)
1609 || (i.types[j].bitfield.fword
1610 && !t->operand_types[j].bitfield.fword)
1611 || (i.types[j].bitfield.tbyte
1612 && !t->operand_types[j].bitfield.tbyte)
1613 || (i.types[j].bitfield.xmmword
1614 && !t->operand_types[j].bitfield.xmmword)
1615 || (i.types[j].bitfield.ymmword
1616 && !t->operand_types[j].bitfield.ymmword)));
1619 /* Return 1 if there is no size conflict on any operands for
1620 instruction template T. */
1623 operand_size_match (const insn_template *t)
1628 /* Don't check jump instructions. */
1629 if (t->opcode_modifier.jump
1630 || t->opcode_modifier.jumpbyte
1631 || t->opcode_modifier.jumpdword
1632 || t->opcode_modifier.jumpintersegment)
1635 /* Check memory and accumulator operand size. */
1636 for (j = 0; j < i.operands; j++)
1638 if (t->operand_types[j].bitfield.anysize)
1641 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1647 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1656 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1659 i.error = operand_size_mismatch;
1663 /* Check reverse. */
1664 gas_assert (i.operands == 2);
1667 for (j = 0; j < 2; j++)
1669 if (t->operand_types[j].bitfield.acc
1670 && !match_reg_size (t, j ? 0 : 1))
1673 if (i.types[j].bitfield.mem
1674 && !match_mem_size (t, j ? 0 : 1))
1682 operand_type_match (i386_operand_type overlap,
1683 i386_operand_type given)
1685 i386_operand_type temp = overlap;
1687 temp.bitfield.jumpabsolute = 0;
1688 temp.bitfield.unspecified = 0;
1689 temp.bitfield.byte = 0;
1690 temp.bitfield.word = 0;
1691 temp.bitfield.dword = 0;
1692 temp.bitfield.fword = 0;
1693 temp.bitfield.qword = 0;
1694 temp.bitfield.tbyte = 0;
1695 temp.bitfield.xmmword = 0;
1696 temp.bitfield.ymmword = 0;
1697 if (operand_type_all_zero (&temp))
1700 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1701 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1705 i.error = operand_type_mismatch;
1709 /* If given types g0 and g1 are registers they must be of the same type
1710 unless the expected operand type register overlap is null.
1711 Note that Acc in a template matches every size of reg. */
1714 operand_type_register_match (i386_operand_type m0,
1715 i386_operand_type g0,
1716 i386_operand_type t0,
1717 i386_operand_type m1,
1718 i386_operand_type g1,
1719 i386_operand_type t1)
1721 if (!operand_type_check (g0, reg))
1724 if (!operand_type_check (g1, reg))
1727 if (g0.bitfield.reg8 == g1.bitfield.reg8
1728 && g0.bitfield.reg16 == g1.bitfield.reg16
1729 && g0.bitfield.reg32 == g1.bitfield.reg32
1730 && g0.bitfield.reg64 == g1.bitfield.reg64)
1733 if (m0.bitfield.acc)
1735 t0.bitfield.reg8 = 1;
1736 t0.bitfield.reg16 = 1;
1737 t0.bitfield.reg32 = 1;
1738 t0.bitfield.reg64 = 1;
1741 if (m1.bitfield.acc)
1743 t1.bitfield.reg8 = 1;
1744 t1.bitfield.reg16 = 1;
1745 t1.bitfield.reg32 = 1;
1746 t1.bitfield.reg64 = 1;
1749 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1750 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1751 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1752 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1755 i.error = register_type_mismatch;
1760 static INLINE unsigned int
1761 mode_from_disp_size (i386_operand_type t)
1763 if (t.bitfield.disp8)
1765 else if (t.bitfield.disp16
1766 || t.bitfield.disp32
1767 || t.bitfield.disp32s)
1774 fits_in_signed_byte (offsetT num)
1776 return (num >= -128) && (num <= 127);
1780 fits_in_unsigned_byte (offsetT num)
1782 return (num & 0xff) == num;
1786 fits_in_unsigned_word (offsetT num)
1788 return (num & 0xffff) == num;
1792 fits_in_signed_word (offsetT num)
1794 return (-32768 <= num) && (num <= 32767);
1798 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1803 return (!(((offsetT) -1 << 31) & num)
1804 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1806 } /* fits_in_signed_long() */
1809 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1814 return (num & (((offsetT) 2 << 31) - 1)) == num;
1816 } /* fits_in_unsigned_long() */
1819 fits_in_imm4 (offsetT num)
1821 return (num & 0xf) == num;
1824 static i386_operand_type
1825 smallest_imm_type (offsetT num)
1827 i386_operand_type t;
1829 operand_type_set (&t, 0);
1830 t.bitfield.imm64 = 1;
1832 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1834 /* This code is disabled on the 486 because all the Imm1 forms
1835 in the opcode table are slower on the i486. They're the
1836 versions with the implicitly specified single-position
1837 displacement, which has another syntax if you really want to
1839 t.bitfield.imm1 = 1;
1840 t.bitfield.imm8 = 1;
1841 t.bitfield.imm8s = 1;
1842 t.bitfield.imm16 = 1;
1843 t.bitfield.imm32 = 1;
1844 t.bitfield.imm32s = 1;
1846 else if (fits_in_signed_byte (num))
1848 t.bitfield.imm8 = 1;
1849 t.bitfield.imm8s = 1;
1850 t.bitfield.imm16 = 1;
1851 t.bitfield.imm32 = 1;
1852 t.bitfield.imm32s = 1;
1854 else if (fits_in_unsigned_byte (num))
1856 t.bitfield.imm8 = 1;
1857 t.bitfield.imm16 = 1;
1858 t.bitfield.imm32 = 1;
1859 t.bitfield.imm32s = 1;
1861 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1863 t.bitfield.imm16 = 1;
1864 t.bitfield.imm32 = 1;
1865 t.bitfield.imm32s = 1;
1867 else if (fits_in_signed_long (num))
1869 t.bitfield.imm32 = 1;
1870 t.bitfield.imm32s = 1;
1872 else if (fits_in_unsigned_long (num))
1873 t.bitfield.imm32 = 1;
1879 offset_in_range (offsetT val, int size)
1885 case 1: mask = ((addressT) 1 << 8) - 1; break;
1886 case 2: mask = ((addressT) 1 << 16) - 1; break;
1887 case 4: mask = ((addressT) 2 << 31) - 1; break;
1889 case 8: mask = ((addressT) 2 << 63) - 1; break;
1895 /* If BFD64, sign extend val for 32bit address mode. */
1896 if (flag_code != CODE_64BIT
1897 || i.prefix[ADDR_PREFIX])
1898 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1899 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1902 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1904 char buf1[40], buf2[40];
1906 sprint_value (buf1, val);
1907 sprint_value (buf2, val & mask);
1908 as_warn (_("%s shortened to %s"), buf1, buf2);
1922 a. PREFIX_EXIST if attempting to add a prefix where one from the
1923 same class already exists.
1924 b. PREFIX_LOCK if lock prefix is added.
1925 c. PREFIX_REP if rep/repne prefix is added.
1926 d. PREFIX_OTHER if other prefix is added.
1929 static enum PREFIX_GROUP
1930 add_prefix (unsigned int prefix)
1932 enum PREFIX_GROUP ret = PREFIX_OTHER;
1935 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1936 && flag_code == CODE_64BIT)
1938 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1939 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1940 && (prefix & (REX_R | REX_X | REX_B))))
1951 case CS_PREFIX_OPCODE:
1952 case DS_PREFIX_OPCODE:
1953 case ES_PREFIX_OPCODE:
1954 case FS_PREFIX_OPCODE:
1955 case GS_PREFIX_OPCODE:
1956 case SS_PREFIX_OPCODE:
1960 case REPNE_PREFIX_OPCODE:
1961 case REPE_PREFIX_OPCODE:
1966 case LOCK_PREFIX_OPCODE:
1975 case ADDR_PREFIX_OPCODE:
1979 case DATA_PREFIX_OPCODE:
1983 if (i.prefix[q] != 0)
1991 i.prefix[q] |= prefix;
1994 as_bad (_("same type of prefix used twice"));
2000 update_code_flag (int value, int check)
2002 PRINTF_LIKE ((*as_error));
2004 flag_code = (enum flag_code) value;
2005 if (flag_code == CODE_64BIT)
2007 cpu_arch_flags.bitfield.cpu64 = 1;
2008 cpu_arch_flags.bitfield.cpuno64 = 0;
2012 cpu_arch_flags.bitfield.cpu64 = 0;
2013 cpu_arch_flags.bitfield.cpuno64 = 1;
2015 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2018 as_error = as_fatal;
2021 (*as_error) (_("64bit mode not supported on `%s'."),
2022 cpu_arch_name ? cpu_arch_name : default_arch);
2024 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2027 as_error = as_fatal;
2030 (*as_error) (_("32bit mode not supported on `%s'."),
2031 cpu_arch_name ? cpu_arch_name : default_arch);
2033 stackop_size = '\0';
2037 set_code_flag (int value)
2039 update_code_flag (value, 0);
2043 set_16bit_gcc_code_flag (int new_code_flag)
2045 flag_code = (enum flag_code) new_code_flag;
2046 if (flag_code != CODE_16BIT)
2048 cpu_arch_flags.bitfield.cpu64 = 0;
2049 cpu_arch_flags.bitfield.cpuno64 = 1;
2050 stackop_size = LONG_MNEM_SUFFIX;
2054 set_intel_syntax (int syntax_flag)
2056 /* Find out if register prefixing is specified. */
2057 int ask_naked_reg = 0;
2060 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2062 char *string = input_line_pointer;
2063 int e = get_symbol_end ();
2065 if (strcmp (string, "prefix") == 0)
2067 else if (strcmp (string, "noprefix") == 0)
2070 as_bad (_("bad argument to syntax directive."));
2071 *input_line_pointer = e;
2073 demand_empty_rest_of_line ();
2075 intel_syntax = syntax_flag;
2077 if (ask_naked_reg == 0)
2078 allow_naked_reg = (intel_syntax
2079 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2081 allow_naked_reg = (ask_naked_reg < 0);
2083 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2085 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2086 identifier_chars['$'] = intel_syntax ? '$' : 0;
2087 register_prefix = allow_naked_reg ? "" : "%";
2091 set_intel_mnemonic (int mnemonic_flag)
2093 intel_mnemonic = mnemonic_flag;
2097 set_allow_index_reg (int flag)
2099 allow_index_reg = flag;
2103 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2107 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2109 char *string = input_line_pointer;
2110 int e = get_symbol_end ();
2112 if (strcmp (string, "none") == 0)
2113 sse_check = sse_check_none;
2114 else if (strcmp (string, "warning") == 0)
2115 sse_check = sse_check_warning;
2116 else if (strcmp (string, "error") == 0)
2117 sse_check = sse_check_error;
2119 as_bad (_("bad argument to sse_check directive."));
2120 *input_line_pointer = e;
2123 as_bad (_("missing argument for sse_check directive"));
2125 demand_empty_rest_of_line ();
2129 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2130 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2132 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2133 static const char *arch;
2135 /* Intel LIOM is only supported on ELF. */
2141 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2142 use default_arch. */
2143 arch = cpu_arch_name;
2145 arch = default_arch;
2148 /* If we are targeting Intel L1OM, we must enable it. */
2149 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2150 || new_flag.bitfield.cpul1om)
2153 /* If we are targeting Intel K1OM, we must enable it. */
2154 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2155 || new_flag.bitfield.cpuk1om)
2158 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2163 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2167 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2169 char *string = input_line_pointer;
2170 int e = get_symbol_end ();
2172 i386_cpu_flags flags;
2174 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2176 if (strcmp (string, cpu_arch[j].name) == 0)
2178 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2182 cpu_arch_name = cpu_arch[j].name;
2183 cpu_sub_arch_name = NULL;
2184 cpu_arch_flags = cpu_arch[j].flags;
2185 if (flag_code == CODE_64BIT)
2187 cpu_arch_flags.bitfield.cpu64 = 1;
2188 cpu_arch_flags.bitfield.cpuno64 = 0;
2192 cpu_arch_flags.bitfield.cpu64 = 0;
2193 cpu_arch_flags.bitfield.cpuno64 = 1;
2195 cpu_arch_isa = cpu_arch[j].type;
2196 cpu_arch_isa_flags = cpu_arch[j].flags;
2197 if (!cpu_arch_tune_set)
2199 cpu_arch_tune = cpu_arch_isa;
2200 cpu_arch_tune_flags = cpu_arch_isa_flags;
2205 if (!cpu_arch[j].negated)
2206 flags = cpu_flags_or (cpu_arch_flags,
2209 flags = cpu_flags_and_not (cpu_arch_flags,
2211 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2213 if (cpu_sub_arch_name)
2215 char *name = cpu_sub_arch_name;
2216 cpu_sub_arch_name = concat (name,
2218 (const char *) NULL);
2222 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2223 cpu_arch_flags = flags;
2224 cpu_arch_isa_flags = flags;
2226 *input_line_pointer = e;
2227 demand_empty_rest_of_line ();
2231 if (j >= ARRAY_SIZE (cpu_arch))
2232 as_bad (_("no such architecture: `%s'"), string);
2234 *input_line_pointer = e;
2237 as_bad (_("missing cpu architecture"));
2239 no_cond_jump_promotion = 0;
2240 if (*input_line_pointer == ','
2241 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2243 char *string = ++input_line_pointer;
2244 int e = get_symbol_end ();
2246 if (strcmp (string, "nojumps") == 0)
2247 no_cond_jump_promotion = 1;
2248 else if (strcmp (string, "jumps") == 0)
2251 as_bad (_("no such architecture modifier: `%s'"), string);
2253 *input_line_pointer = e;
2256 demand_empty_rest_of_line ();
2259 enum bfd_architecture
2262 if (cpu_arch_isa == PROCESSOR_L1OM)
2264 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2265 || flag_code != CODE_64BIT)
2266 as_fatal (_("Intel L1OM is 64bit ELF only"));
2267 return bfd_arch_l1om;
2269 else if (cpu_arch_isa == PROCESSOR_K1OM)
2271 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2272 || flag_code != CODE_64BIT)
2273 as_fatal (_("Intel K1OM is 64bit ELF only"));
2274 return bfd_arch_k1om;
2277 return bfd_arch_i386;
2283 if (!strncmp (default_arch, "x86_64", 6))
2285 if (cpu_arch_isa == PROCESSOR_L1OM)
2287 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2288 || default_arch[6] != '\0')
2289 as_fatal (_("Intel L1OM is 64bit ELF only"));
2290 return bfd_mach_l1om;
2292 else if (cpu_arch_isa == PROCESSOR_K1OM)
2294 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2295 || default_arch[6] != '\0')
2296 as_fatal (_("Intel K1OM is 64bit ELF only"));
2297 return bfd_mach_k1om;
2299 else if (default_arch[6] == '\0')
2300 return bfd_mach_x86_64;
2302 return bfd_mach_x64_32;
2304 else if (!strcmp (default_arch, "i386"))
2305 return bfd_mach_i386_i386;
2307 as_fatal (_("unknown architecture"));
2313 const char *hash_err;
2315 /* Initialize op_hash hash table. */
2316 op_hash = hash_new ();
2319 const insn_template *optab;
2320 templates *core_optab;
2322 /* Setup for loop. */
2324 core_optab = (templates *) xmalloc (sizeof (templates));
2325 core_optab->start = optab;
2330 if (optab->name == NULL
2331 || strcmp (optab->name, (optab - 1)->name) != 0)
2333 /* different name --> ship out current template list;
2334 add to hash table; & begin anew. */
2335 core_optab->end = optab;
2336 hash_err = hash_insert (op_hash,
2338 (void *) core_optab);
2341 as_fatal (_("internal Error: Can't hash %s: %s"),
2345 if (optab->name == NULL)
2347 core_optab = (templates *) xmalloc (sizeof (templates));
2348 core_optab->start = optab;
2353 /* Initialize reg_hash hash table. */
2354 reg_hash = hash_new ();
2356 const reg_entry *regtab;
2357 unsigned int regtab_size = i386_regtab_size;
2359 for (regtab = i386_regtab; regtab_size--; regtab++)
2361 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2363 as_fatal (_("internal Error: Can't hash %s: %s"),
2369 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2374 for (c = 0; c < 256; c++)
2379 mnemonic_chars[c] = c;
2380 register_chars[c] = c;
2381 operand_chars[c] = c;
2383 else if (ISLOWER (c))
2385 mnemonic_chars[c] = c;
2386 register_chars[c] = c;
2387 operand_chars[c] = c;
2389 else if (ISUPPER (c))
2391 mnemonic_chars[c] = TOLOWER (c);
2392 register_chars[c] = mnemonic_chars[c];
2393 operand_chars[c] = c;
2396 if (ISALPHA (c) || ISDIGIT (c))
2397 identifier_chars[c] = c;
2400 identifier_chars[c] = c;
2401 operand_chars[c] = c;
2406 identifier_chars['@'] = '@';
2409 identifier_chars['?'] = '?';
2410 operand_chars['?'] = '?';
2412 digit_chars['-'] = '-';
2413 mnemonic_chars['_'] = '_';
2414 mnemonic_chars['-'] = '-';
2415 mnemonic_chars['.'] = '.';
2416 identifier_chars['_'] = '_';
2417 identifier_chars['.'] = '.';
2419 for (p = operand_special_chars; *p != '\0'; p++)
2420 operand_chars[(unsigned char) *p] = *p;
2423 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2426 record_alignment (text_section, 2);
2427 record_alignment (data_section, 2);
2428 record_alignment (bss_section, 2);
2432 if (flag_code == CODE_64BIT)
2434 #if defined (OBJ_COFF) && defined (TE_PE)
2435 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2438 x86_dwarf2_return_column = 16;
2440 x86_cie_data_alignment = -8;
2444 x86_dwarf2_return_column = 8;
2445 x86_cie_data_alignment = -4;
2450 i386_print_statistics (FILE *file)
2452 hash_print_statistics (file, "i386 opcode", op_hash);
2453 hash_print_statistics (file, "i386 register", reg_hash);
2458 /* Debugging routines for md_assemble. */
2459 static void pte (insn_template *);
2460 static void pt (i386_operand_type);
2461 static void pe (expressionS *);
2462 static void ps (symbolS *);
2465 pi (char *line, i386_insn *x)
2469 fprintf (stdout, "%s: template ", line);
2471 fprintf (stdout, " address: base %s index %s scale %x\n",
2472 x->base_reg ? x->base_reg->reg_name : "none",
2473 x->index_reg ? x->index_reg->reg_name : "none",
2474 x->log2_scale_factor);
2475 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2476 x->rm.mode, x->rm.reg, x->rm.regmem);
2477 fprintf (stdout, " sib: base %x index %x scale %x\n",
2478 x->sib.base, x->sib.index, x->sib.scale);
2479 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2480 (x->rex & REX_W) != 0,
2481 (x->rex & REX_R) != 0,
2482 (x->rex & REX_X) != 0,
2483 (x->rex & REX_B) != 0);
2484 for (j = 0; j < x->operands; j++)
2486 fprintf (stdout, " #%d: ", j + 1);
2488 fprintf (stdout, "\n");
2489 if (x->types[j].bitfield.reg8
2490 || x->types[j].bitfield.reg16
2491 || x->types[j].bitfield.reg32
2492 || x->types[j].bitfield.reg64
2493 || x->types[j].bitfield.regmmx
2494 || x->types[j].bitfield.regxmm
2495 || x->types[j].bitfield.regymm
2496 || x->types[j].bitfield.sreg2
2497 || x->types[j].bitfield.sreg3
2498 || x->types[j].bitfield.control
2499 || x->types[j].bitfield.debug
2500 || x->types[j].bitfield.test)
2501 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2502 if (operand_type_check (x->types[j], imm))
2504 if (operand_type_check (x->types[j], disp))
2505 pe (x->op[j].disps);
2510 pte (insn_template *t)
2513 fprintf (stdout, " %d operands ", t->operands);
2514 fprintf (stdout, "opcode %x ", t->base_opcode);
2515 if (t->extension_opcode != None)
2516 fprintf (stdout, "ext %x ", t->extension_opcode);
2517 if (t->opcode_modifier.d)
2518 fprintf (stdout, "D");
2519 if (t->opcode_modifier.w)
2520 fprintf (stdout, "W");
2521 fprintf (stdout, "\n");
2522 for (j = 0; j < t->operands; j++)
2524 fprintf (stdout, " #%d type ", j + 1);
2525 pt (t->operand_types[j]);
2526 fprintf (stdout, "\n");
2533 fprintf (stdout, " operation %d\n", e->X_op);
2534 fprintf (stdout, " add_number %ld (%lx)\n",
2535 (long) e->X_add_number, (long) e->X_add_number);
2536 if (e->X_add_symbol)
2538 fprintf (stdout, " add_symbol ");
2539 ps (e->X_add_symbol);
2540 fprintf (stdout, "\n");
2544 fprintf (stdout, " op_symbol ");
2545 ps (e->X_op_symbol);
2546 fprintf (stdout, "\n");
2553 fprintf (stdout, "%s type %s%s",
2555 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2556 segment_name (S_GET_SEGMENT (s)));
2559 static struct type_name
2561 i386_operand_type mask;
2564 const type_names[] =
2566 { OPERAND_TYPE_REG8, "r8" },
2567 { OPERAND_TYPE_REG16, "r16" },
2568 { OPERAND_TYPE_REG32, "r32" },
2569 { OPERAND_TYPE_REG64, "r64" },
2570 { OPERAND_TYPE_IMM8, "i8" },
2571 { OPERAND_TYPE_IMM8, "i8s" },
2572 { OPERAND_TYPE_IMM16, "i16" },
2573 { OPERAND_TYPE_IMM32, "i32" },
2574 { OPERAND_TYPE_IMM32S, "i32s" },
2575 { OPERAND_TYPE_IMM64, "i64" },
2576 { OPERAND_TYPE_IMM1, "i1" },
2577 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2578 { OPERAND_TYPE_DISP8, "d8" },
2579 { OPERAND_TYPE_DISP16, "d16" },
2580 { OPERAND_TYPE_DISP32, "d32" },
2581 { OPERAND_TYPE_DISP32S, "d32s" },
2582 { OPERAND_TYPE_DISP64, "d64" },
2583 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2584 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2585 { OPERAND_TYPE_CONTROL, "control reg" },
2586 { OPERAND_TYPE_TEST, "test reg" },
2587 { OPERAND_TYPE_DEBUG, "debug reg" },
2588 { OPERAND_TYPE_FLOATREG, "FReg" },
2589 { OPERAND_TYPE_FLOATACC, "FAcc" },
2590 { OPERAND_TYPE_SREG2, "SReg2" },
2591 { OPERAND_TYPE_SREG3, "SReg3" },
2592 { OPERAND_TYPE_ACC, "Acc" },
2593 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2594 { OPERAND_TYPE_REGMMX, "rMMX" },
2595 { OPERAND_TYPE_REGXMM, "rXMM" },
2596 { OPERAND_TYPE_REGYMM, "rYMM" },
2597 { OPERAND_TYPE_ESSEG, "es" },
2601 pt (i386_operand_type t)
2604 i386_operand_type a;
2606 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2608 a = operand_type_and (t, type_names[j].mask);
2609 if (!operand_type_all_zero (&a))
2610 fprintf (stdout, "%s, ", type_names[j].name);
2615 #endif /* DEBUG386 */
2617 static bfd_reloc_code_real_type
2618 reloc (unsigned int size,
2621 bfd_reloc_code_real_type other)
2623 if (other != NO_RELOC)
2625 reloc_howto_type *rel;
2630 case BFD_RELOC_X86_64_GOT32:
2631 return BFD_RELOC_X86_64_GOT64;
2633 case BFD_RELOC_X86_64_PLTOFF64:
2634 return BFD_RELOC_X86_64_PLTOFF64;
2636 case BFD_RELOC_X86_64_GOTPC32:
2637 other = BFD_RELOC_X86_64_GOTPC64;
2639 case BFD_RELOC_X86_64_GOTPCREL:
2640 other = BFD_RELOC_X86_64_GOTPCREL64;
2642 case BFD_RELOC_X86_64_TPOFF32:
2643 other = BFD_RELOC_X86_64_TPOFF64;
2645 case BFD_RELOC_X86_64_DTPOFF32:
2646 other = BFD_RELOC_X86_64_DTPOFF64;
2652 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2653 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2656 rel = bfd_reloc_type_lookup (stdoutput, other);
2658 as_bad (_("unknown relocation (%u)"), other);
2659 else if (size != bfd_get_reloc_size (rel))
2660 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2661 bfd_get_reloc_size (rel),
2663 else if (pcrel && !rel->pc_relative)
2664 as_bad (_("non-pc-relative relocation for pc-relative field"));
2665 else if ((rel->complain_on_overflow == complain_overflow_signed
2667 || (rel->complain_on_overflow == complain_overflow_unsigned
2669 as_bad (_("relocated field and relocation type differ in signedness"));
2678 as_bad (_("there are no unsigned pc-relative relocations"));
2681 case 1: return BFD_RELOC_8_PCREL;
2682 case 2: return BFD_RELOC_16_PCREL;
2683 case 4: return BFD_RELOC_32_PCREL;
2684 case 8: return BFD_RELOC_64_PCREL;
2686 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2693 case 4: return BFD_RELOC_X86_64_32S;
2698 case 1: return BFD_RELOC_8;
2699 case 2: return BFD_RELOC_16;
2700 case 4: return BFD_RELOC_32;
2701 case 8: return BFD_RELOC_64;
2703 as_bad (_("cannot do %s %u byte relocation"),
2704 sign > 0 ? "signed" : "unsigned", size);
2710 /* Here we decide which fixups can be adjusted to make them relative to
2711 the beginning of the section instead of the symbol. Basically we need
2712 to make sure that the dynamic relocations are done correctly, so in
2713 some cases we force the original symbol to be used. */
2716 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2718 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2722 /* Don't adjust pc-relative references to merge sections in 64-bit
2724 if (use_rela_relocations
2725 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2729 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2730 and changed later by validate_fix. */
2731 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2732 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2735 /* adjust_reloc_syms doesn't know about the GOT. */
2736 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2737 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2738 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2739 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2740 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2741 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2742 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2743 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2744 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2745 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2746 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2747 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2748 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2749 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2750 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2751 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2752 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2753 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2754 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2755 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2756 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2757 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2758 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2759 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2760 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2761 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2762 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2763 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2770 intel_float_operand (const char *mnemonic)
2772 /* Note that the value returned is meaningful only for opcodes with (memory)
2773 operands, hence the code here is free to improperly handle opcodes that
2774 have no operands (for better performance and smaller code). */
2776 if (mnemonic[0] != 'f')
2777 return 0; /* non-math */
2779 switch (mnemonic[1])
2781 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2782 the fs segment override prefix not currently handled because no
2783 call path can make opcodes without operands get here */
2785 return 2 /* integer op */;
2787 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2788 return 3; /* fldcw/fldenv */
2791 if (mnemonic[2] != 'o' /* fnop */)
2792 return 3; /* non-waiting control op */
2795 if (mnemonic[2] == 's')
2796 return 3; /* frstor/frstpm */
2799 if (mnemonic[2] == 'a')
2800 return 3; /* fsave */
2801 if (mnemonic[2] == 't')
2803 switch (mnemonic[3])
2805 case 'c': /* fstcw */
2806 case 'd': /* fstdw */
2807 case 'e': /* fstenv */
2808 case 's': /* fsts[gw] */
2814 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2815 return 0; /* fxsave/fxrstor are not really math ops */
2822 /* Build the VEX prefix. */
2825 build_vex_prefix (const insn_template *t)
2827 unsigned int register_specifier;
2828 unsigned int implied_prefix;
2829 unsigned int vector_length;
2831 /* Check register specifier. */
2832 if (i.vex.register_specifier)
2834 register_specifier = i.vex.register_specifier->reg_num;
2835 if ((i.vex.register_specifier->reg_flags & RegRex))
2836 register_specifier += 8;
2837 register_specifier = ~register_specifier & 0xf;
2840 register_specifier = 0xf;
2842 /* Use 2-byte VEX prefix by swappping destination and source
2845 && i.operands == i.reg_operands
2846 && i.tm.opcode_modifier.vexopcode == VEX0F
2847 && i.tm.opcode_modifier.s
2850 unsigned int xchg = i.operands - 1;
2851 union i386_op temp_op;
2852 i386_operand_type temp_type;
2854 temp_type = i.types[xchg];
2855 i.types[xchg] = i.types[0];
2856 i.types[0] = temp_type;
2857 temp_op = i.op[xchg];
2858 i.op[xchg] = i.op[0];
2861 gas_assert (i.rm.mode == 3);
2865 i.rm.regmem = i.rm.reg;
2868 /* Use the next insn. */
2872 if (i.tm.opcode_modifier.vex == VEXScalar)
2873 vector_length = avxscalar;
2875 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2877 switch ((i.tm.base_opcode >> 8) & 0xff)
2882 case DATA_PREFIX_OPCODE:
2885 case REPE_PREFIX_OPCODE:
2888 case REPNE_PREFIX_OPCODE:
2895 /* Use 2-byte VEX prefix if possible. */
2896 if (i.tm.opcode_modifier.vexopcode == VEX0F
2897 && i.tm.opcode_modifier.vexw != VEXW1
2898 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2900 /* 2-byte VEX prefix. */
2904 i.vex.bytes[0] = 0xc5;
2906 /* Check the REX.R bit. */
2907 r = (i.rex & REX_R) ? 0 : 1;
2908 i.vex.bytes[1] = (r << 7
2909 | register_specifier << 3
2910 | vector_length << 2
2915 /* 3-byte VEX prefix. */
2920 switch (i.tm.opcode_modifier.vexopcode)
2924 i.vex.bytes[0] = 0xc4;
2928 i.vex.bytes[0] = 0xc4;
2932 i.vex.bytes[0] = 0xc4;
2936 i.vex.bytes[0] = 0x8f;
2940 i.vex.bytes[0] = 0x8f;
2944 i.vex.bytes[0] = 0x8f;
2950 /* The high 3 bits of the second VEX byte are 1's compliment
2951 of RXB bits from REX. */
2952 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2954 /* Check the REX.W bit. */
2955 w = (i.rex & REX_W) ? 1 : 0;
2956 if (i.tm.opcode_modifier.vexw)
2961 if (i.tm.opcode_modifier.vexw == VEXW1)
2965 i.vex.bytes[2] = (w << 7
2966 | register_specifier << 3
2967 | vector_length << 2
2973 process_immext (void)
2977 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2979 /* SSE3 Instructions have the fixed operands with an opcode
2980 suffix which is coded in the same place as an 8-bit immediate
2981 field would be. Here we check those operands and remove them
2985 for (x = 0; x < i.operands; x++)
2986 if (i.op[x].regs->reg_num != x)
2987 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2988 register_prefix, i.op[x].regs->reg_name, x + 1,
2994 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2995 which is coded in the same place as an 8-bit immediate field
2996 would be. Here we fake an 8-bit immediate operand from the
2997 opcode suffix stored in tm.extension_opcode.
2999 AVX instructions also use this encoding, for some of
3000 3 argument instructions. */
3002 gas_assert (i.imm_operands == 0
3004 || (i.tm.opcode_modifier.vex
3005 && i.operands <= 4)));
3007 exp = &im_expressions[i.imm_operands++];
3008 i.op[i.operands].imms = exp;
3009 i.types[i.operands] = imm8;
3011 exp->X_op = O_constant;
3012 exp->X_add_number = i.tm.extension_opcode;
3013 i.tm.extension_opcode = None;
3020 switch (i.tm.opcode_modifier.hleprefixok)
3025 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3026 as_bad (_("invalid instruction `%s' after `xacquire'"),
3029 as_bad (_("invalid instruction `%s' after `xrelease'"),
3033 if (i.prefix[LOCK_PREFIX])
3035 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3036 as_bad (_("missing `lock' with `xacquire'"));
3038 as_bad (_("missing `lock' with `xrelease'"));
3042 case HLEPrefixRelease:
3043 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3045 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3049 if (i.mem_operands == 0
3050 || !operand_type_check (i.types[i.operands - 1], anymem))
3052 as_bad (_("memory destination needed for instruction `%s'"
3053 " after `xrelease'"), i.tm.name);
3060 /* This is the guts of the machine-dependent assembler. LINE points to a
3061 machine dependent instruction. This function is supposed to emit
3062 the frags/bytes it assembles to. */
3065 md_assemble (char *line)
3068 char mnemonic[MAX_MNEM_SIZE];
3069 const insn_template *t;
3071 /* Initialize globals. */
3072 memset (&i, '\0', sizeof (i));
3073 for (j = 0; j < MAX_OPERANDS; j++)
3074 i.reloc[j] = NO_RELOC;
3075 memset (disp_expressions, '\0', sizeof (disp_expressions));
3076 memset (im_expressions, '\0', sizeof (im_expressions));
3077 save_stack_p = save_stack;
3079 /* First parse an instruction mnemonic & call i386_operand for the operands.
3080 We assume that the scrubber has arranged it so that line[0] is the valid
3081 start of a (possibly prefixed) mnemonic. */
3083 line = parse_insn (line, mnemonic);
3087 line = parse_operands (line, mnemonic);
3092 /* Now we've parsed the mnemonic into a set of templates, and have the
3093 operands at hand. */
3095 /* All intel opcodes have reversed operands except for "bound" and
3096 "enter". We also don't reverse intersegment "jmp" and "call"
3097 instructions with 2 immediate operands so that the immediate segment
3098 precedes the offset, as it does when in AT&T mode. */
3101 && (strcmp (mnemonic, "bound") != 0)
3102 && (strcmp (mnemonic, "invlpga") != 0)
3103 && !(operand_type_check (i.types[0], imm)
3104 && operand_type_check (i.types[1], imm)))
3107 /* The order of the immediates should be reversed
3108 for 2 immediates extrq and insertq instructions */
3109 if (i.imm_operands == 2
3110 && (strcmp (mnemonic, "extrq") == 0
3111 || strcmp (mnemonic, "insertq") == 0))
3112 swap_2_operands (0, 1);
3117 /* Don't optimize displacement for movabs since it only takes 64bit
3120 && i.disp_encoding != disp_encoding_32bit
3121 && (flag_code != CODE_64BIT
3122 || strcmp (mnemonic, "movabs") != 0))
3125 /* Next, we find a template that matches the given insn,
3126 making sure the overlap of the given operands types is consistent
3127 with the template operand types. */
3129 if (!(t = match_template ()))
3132 if (sse_check != sse_check_none
3133 && !i.tm.opcode_modifier.noavx
3134 && (i.tm.cpu_flags.bitfield.cpusse
3135 || i.tm.cpu_flags.bitfield.cpusse2
3136 || i.tm.cpu_flags.bitfield.cpusse3
3137 || i.tm.cpu_flags.bitfield.cpussse3
3138 || i.tm.cpu_flags.bitfield.cpusse4_1
3139 || i.tm.cpu_flags.bitfield.cpusse4_2))
3141 (sse_check == sse_check_warning
3143 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3146 /* Zap movzx and movsx suffix. The suffix has been set from
3147 "word ptr" or "byte ptr" on the source operand in Intel syntax
3148 or extracted from mnemonic in AT&T syntax. But we'll use
3149 the destination register to choose the suffix for encoding. */
3150 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3152 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3153 there is no suffix, the default will be byte extension. */
3154 if (i.reg_operands != 2
3157 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3162 if (i.tm.opcode_modifier.fwait)
3163 if (!add_prefix (FWAIT_OPCODE))
3166 /* Check for lock without a lockable instruction. Destination operand
3167 must be memory unless it is xchg (0x86). */
3168 if (i.prefix[LOCK_PREFIX]
3169 && (!i.tm.opcode_modifier.islockable
3170 || i.mem_operands == 0
3171 || (i.tm.base_opcode != 0x86
3172 && !operand_type_check (i.types[i.operands - 1], anymem))))
3174 as_bad (_("expecting lockable instruction after `lock'"));
3178 /* Check if HLE prefix is OK. */
3179 if (i.have_hle && !check_hle ())
3182 /* Check string instruction segment overrides. */
3183 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3185 if (!check_string ())
3187 i.disp_operands = 0;
3190 if (!process_suffix ())
3193 /* Update operand types. */
3194 for (j = 0; j < i.operands; j++)
3195 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3197 /* Make still unresolved immediate matches conform to size of immediate
3198 given in i.suffix. */
3199 if (!finalize_imm ())
3202 if (i.types[0].bitfield.imm1)
3203 i.imm_operands = 0; /* kludge for shift insns. */
3205 /* We only need to check those implicit registers for instructions
3206 with 3 operands or less. */
3207 if (i.operands <= 3)
3208 for (j = 0; j < i.operands; j++)
3209 if (i.types[j].bitfield.inoutportreg
3210 || i.types[j].bitfield.shiftcount
3211 || i.types[j].bitfield.acc
3212 || i.types[j].bitfield.floatacc)
3215 /* ImmExt should be processed after SSE2AVX. */
3216 if (!i.tm.opcode_modifier.sse2avx
3217 && i.tm.opcode_modifier.immext)
3220 /* For insns with operands there are more diddles to do to the opcode. */
3223 if (!process_operands ())
3226 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3228 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3229 as_warn (_("translating to `%sp'"), i.tm.name);
3232 if (i.tm.opcode_modifier.vex)
3233 build_vex_prefix (t);
3235 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3236 instructions may define INT_OPCODE as well, so avoid this corner
3237 case for those instructions that use MODRM. */
3238 if (i.tm.base_opcode == INT_OPCODE
3239 && !i.tm.opcode_modifier.modrm
3240 && i.op[0].imms->X_add_number == 3)
3242 i.tm.base_opcode = INT3_OPCODE;
3246 if ((i.tm.opcode_modifier.jump
3247 || i.tm.opcode_modifier.jumpbyte
3248 || i.tm.opcode_modifier.jumpdword)
3249 && i.op[0].disps->X_op == O_constant)
3251 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3252 the absolute address given by the constant. Since ix86 jumps and
3253 calls are pc relative, we need to generate a reloc. */
3254 i.op[0].disps->X_add_symbol = &abs_symbol;
3255 i.op[0].disps->X_op = O_symbol;
3258 if (i.tm.opcode_modifier.rex64)
3261 /* For 8 bit registers we need an empty rex prefix. Also if the
3262 instruction already has a prefix, we need to convert old
3263 registers to new ones. */
3265 if ((i.types[0].bitfield.reg8
3266 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3267 || (i.types[1].bitfield.reg8
3268 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3269 || ((i.types[0].bitfield.reg8
3270 || i.types[1].bitfield.reg8)
3275 i.rex |= REX_OPCODE;
3276 for (x = 0; x < 2; x++)
3278 /* Look for 8 bit operand that uses old registers. */
3279 if (i.types[x].bitfield.reg8
3280 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3282 /* In case it is "hi" register, give up. */
3283 if (i.op[x].regs->reg_num > 3)
3284 as_bad (_("can't encode register '%s%s' in an "
3285 "instruction requiring REX prefix."),
3286 register_prefix, i.op[x].regs->reg_name);
3288 /* Otherwise it is equivalent to the extended register.
3289 Since the encoding doesn't change this is merely
3290 cosmetic cleanup for debug output. */
3292 i.op[x].regs = i.op[x].regs + 8;
3298 add_prefix (REX_OPCODE | i.rex);
3300 /* We are ready to output the insn. */
3305 parse_insn (char *line, char *mnemonic)
3308 char *token_start = l;
3311 const insn_template *t;
3314 /* Non-zero if we found a prefix only acceptable with string insns. */
3315 const char *expecting_string_instruction = NULL;
3320 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3325 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3327 as_bad (_("no such instruction: `%s'"), token_start);
3332 if (!is_space_char (*l)
3333 && *l != END_OF_INSN
3335 || (*l != PREFIX_SEPARATOR
3338 as_bad (_("invalid character %s in mnemonic"),
3339 output_invalid (*l));
3342 if (token_start == l)
3344 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3345 as_bad (_("expecting prefix; got nothing"));
3347 as_bad (_("expecting mnemonic; got nothing"));
3351 /* Look up instruction (or prefix) via hash table. */
3352 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3354 if (*l != END_OF_INSN
3355 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3356 && current_templates
3357 && current_templates->start->opcode_modifier.isprefix)
3359 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3361 as_bad ((flag_code != CODE_64BIT
3362 ? _("`%s' is only supported in 64-bit mode")
3363 : _("`%s' is not supported in 64-bit mode")),
3364 current_templates->start->name);
3367 /* If we are in 16-bit mode, do not allow addr16 or data16.
3368 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3369 if ((current_templates->start->opcode_modifier.size16
3370 || current_templates->start->opcode_modifier.size32)
3371 && flag_code != CODE_64BIT
3372 && (current_templates->start->opcode_modifier.size32
3373 ^ (flag_code == CODE_16BIT)))
3375 as_bad (_("redundant %s prefix"),
3376 current_templates->start->name);
3379 /* Add prefix, checking for repeated prefixes. */
3380 switch (add_prefix (current_templates->start->base_opcode))
3385 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3388 expecting_string_instruction = current_templates->start->name;
3393 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3400 if (!current_templates)
3402 /* Check if we should swap operand or force 32bit displacement in
3404 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3406 else if (mnem_p - 3 == dot_p
3409 i.disp_encoding = disp_encoding_8bit;
3410 else if (mnem_p - 4 == dot_p
3414 i.disp_encoding = disp_encoding_32bit;
3419 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3422 if (!current_templates)
3425 /* See if we can get a match by trimming off a suffix. */
3428 case WORD_MNEM_SUFFIX:
3429 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3430 i.suffix = SHORT_MNEM_SUFFIX;
3432 case BYTE_MNEM_SUFFIX:
3433 case QWORD_MNEM_SUFFIX:
3434 i.suffix = mnem_p[-1];
3436 current_templates = (const templates *) hash_find (op_hash,
3439 case SHORT_MNEM_SUFFIX:
3440 case LONG_MNEM_SUFFIX:
3443 i.suffix = mnem_p[-1];
3445 current_templates = (const templates *) hash_find (op_hash,
3454 if (intel_float_operand (mnemonic) == 1)
3455 i.suffix = SHORT_MNEM_SUFFIX;
3457 i.suffix = LONG_MNEM_SUFFIX;
3459 current_templates = (const templates *) hash_find (op_hash,
3464 if (!current_templates)
3466 as_bad (_("no such instruction: `%s'"), token_start);
3471 if (current_templates->start->opcode_modifier.jump
3472 || current_templates->start->opcode_modifier.jumpbyte)
3474 /* Check for a branch hint. We allow ",pt" and ",pn" for
3475 predict taken and predict not taken respectively.
3476 I'm not sure that branch hints actually do anything on loop
3477 and jcxz insns (JumpByte) for current Pentium4 chips. They
3478 may work in the future and it doesn't hurt to accept them
3480 if (l[0] == ',' && l[1] == 'p')
3484 if (!add_prefix (DS_PREFIX_OPCODE))
3488 else if (l[2] == 'n')
3490 if (!add_prefix (CS_PREFIX_OPCODE))
3496 /* Any other comma loses. */
3499 as_bad (_("invalid character %s in mnemonic"),
3500 output_invalid (*l));
3504 /* Check if instruction is supported on specified architecture. */
3506 for (t = current_templates->start; t < current_templates->end; ++t)
3508 supported |= cpu_flags_match (t);
3509 if (supported == CPU_FLAGS_PERFECT_MATCH)
3513 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3515 as_bad (flag_code == CODE_64BIT
3516 ? _("`%s' is not supported in 64-bit mode")
3517 : _("`%s' is only supported in 64-bit mode"),
3518 current_templates->start->name);
3521 if (supported != CPU_FLAGS_PERFECT_MATCH)
3523 as_bad (_("`%s' is not supported on `%s%s'"),
3524 current_templates->start->name,
3525 cpu_arch_name ? cpu_arch_name : default_arch,
3526 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3531 if (!cpu_arch_flags.bitfield.cpui386
3532 && (flag_code != CODE_16BIT))
3534 as_warn (_("use .code16 to ensure correct addressing mode"));
3537 /* Check for rep/repne without a string (or other allowed) instruction. */
3538 if (expecting_string_instruction)
3540 static templates override;
3542 for (t = current_templates->start; t < current_templates->end; ++t)
3543 if (t->opcode_modifier.repprefixok)
3545 if (t >= current_templates->end)
3547 as_bad (_("expecting string instruction after `%s'"),
3548 expecting_string_instruction);
3551 for (override.start = t; t < current_templates->end; ++t)
3552 if (!t->opcode_modifier.repprefixok)
3555 current_templates = &override;
3562 parse_operands (char *l, const char *mnemonic)
3566 /* 1 if operand is pending after ','. */
3567 unsigned int expecting_operand = 0;
3569 /* Non-zero if operand parens not balanced. */
3570 unsigned int paren_not_balanced;
3572 while (*l != END_OF_INSN)
3574 /* Skip optional white space before operand. */
3575 if (is_space_char (*l))
3577 if (!is_operand_char (*l) && *l != END_OF_INSN)
3579 as_bad (_("invalid character %s before operand %d"),
3580 output_invalid (*l),
3584 token_start = l; /* after white space */
3585 paren_not_balanced = 0;
3586 while (paren_not_balanced || *l != ',')
3588 if (*l == END_OF_INSN)
3590 if (paren_not_balanced)
3593 as_bad (_("unbalanced parenthesis in operand %d."),
3596 as_bad (_("unbalanced brackets in operand %d."),
3601 break; /* we are done */
3603 else if (!is_operand_char (*l) && !is_space_char (*l))
3605 as_bad (_("invalid character %s in operand %d"),
3606 output_invalid (*l),
3613 ++paren_not_balanced;
3615 --paren_not_balanced;
3620 ++paren_not_balanced;
3622 --paren_not_balanced;
3626 if (l != token_start)
3627 { /* Yes, we've read in another operand. */
3628 unsigned int operand_ok;
3629 this_operand = i.operands++;
3630 i.types[this_operand].bitfield.unspecified = 1;
3631 if (i.operands > MAX_OPERANDS)
3633 as_bad (_("spurious operands; (%d operands/instruction max)"),
3637 /* Now parse operand adding info to 'i' as we go along. */
3638 END_STRING_AND_SAVE (l);
3642 i386_intel_operand (token_start,
3643 intel_float_operand (mnemonic));
3645 operand_ok = i386_att_operand (token_start);
3647 RESTORE_END_STRING (l);
3653 if (expecting_operand)
3655 expecting_operand_after_comma:
3656 as_bad (_("expecting operand after ','; got nothing"));
3661 as_bad (_("expecting operand before ','; got nothing"));
3666 /* Now *l must be either ',' or END_OF_INSN. */
3669 if (*++l == END_OF_INSN)
3671 /* Just skip it, if it's \n complain. */
3672 goto expecting_operand_after_comma;
3674 expecting_operand = 1;
3681 swap_2_operands (int xchg1, int xchg2)
3683 union i386_op temp_op;
3684 i386_operand_type temp_type;
3685 enum bfd_reloc_code_real temp_reloc;
3687 temp_type = i.types[xchg2];
3688 i.types[xchg2] = i.types[xchg1];
3689 i.types[xchg1] = temp_type;
3690 temp_op = i.op[xchg2];
3691 i.op[xchg2] = i.op[xchg1];
3692 i.op[xchg1] = temp_op;
3693 temp_reloc = i.reloc[xchg2];
3694 i.reloc[xchg2] = i.reloc[xchg1];
3695 i.reloc[xchg1] = temp_reloc;
3699 swap_operands (void)
3705 swap_2_operands (1, i.operands - 2);
3708 swap_2_operands (0, i.operands - 1);
3714 if (i.mem_operands == 2)
3716 const seg_entry *temp_seg;
3717 temp_seg = i.seg[0];
3718 i.seg[0] = i.seg[1];
3719 i.seg[1] = temp_seg;
3723 /* Try to ensure constant immediates are represented in the smallest
3728 char guess_suffix = 0;
3732 guess_suffix = i.suffix;
3733 else if (i.reg_operands)
3735 /* Figure out a suffix from the last register operand specified.
3736 We can't do this properly yet, ie. excluding InOutPortReg,
3737 but the following works for instructions with immediates.
3738 In any case, we can't set i.suffix yet. */
3739 for (op = i.operands; --op >= 0;)
3740 if (i.types[op].bitfield.reg8)
3742 guess_suffix = BYTE_MNEM_SUFFIX;
3745 else if (i.types[op].bitfield.reg16)
3747 guess_suffix = WORD_MNEM_SUFFIX;
3750 else if (i.types[op].bitfield.reg32)
3752 guess_suffix = LONG_MNEM_SUFFIX;
3755 else if (i.types[op].bitfield.reg64)
3757 guess_suffix = QWORD_MNEM_SUFFIX;
3761 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3762 guess_suffix = WORD_MNEM_SUFFIX;
3764 for (op = i.operands; --op >= 0;)
3765 if (operand_type_check (i.types[op], imm))
3767 switch (i.op[op].imms->X_op)
3770 /* If a suffix is given, this operand may be shortened. */
3771 switch (guess_suffix)
3773 case LONG_MNEM_SUFFIX:
3774 i.types[op].bitfield.imm32 = 1;
3775 i.types[op].bitfield.imm64 = 1;
3777 case WORD_MNEM_SUFFIX:
3778 i.types[op].bitfield.imm16 = 1;
3779 i.types[op].bitfield.imm32 = 1;
3780 i.types[op].bitfield.imm32s = 1;
3781 i.types[op].bitfield.imm64 = 1;
3783 case BYTE_MNEM_SUFFIX:
3784 i.types[op].bitfield.imm8 = 1;
3785 i.types[op].bitfield.imm8s = 1;
3786 i.types[op].bitfield.imm16 = 1;
3787 i.types[op].bitfield.imm32 = 1;
3788 i.types[op].bitfield.imm32s = 1;
3789 i.types[op].bitfield.imm64 = 1;
3793 /* If this operand is at most 16 bits, convert it
3794 to a signed 16 bit number before trying to see
3795 whether it will fit in an even smaller size.
3796 This allows a 16-bit operand such as $0xffe0 to
3797 be recognised as within Imm8S range. */
3798 if ((i.types[op].bitfield.imm16)
3799 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3801 i.op[op].imms->X_add_number =
3802 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3804 if ((i.types[op].bitfield.imm32)
3805 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3808 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3809 ^ ((offsetT) 1 << 31))
3810 - ((offsetT) 1 << 31));
3813 = operand_type_or (i.types[op],
3814 smallest_imm_type (i.op[op].imms->X_add_number));
3816 /* We must avoid matching of Imm32 templates when 64bit
3817 only immediate is available. */
3818 if (guess_suffix == QWORD_MNEM_SUFFIX)
3819 i.types[op].bitfield.imm32 = 0;
3826 /* Symbols and expressions. */
3828 /* Convert symbolic operand to proper sizes for matching, but don't
3829 prevent matching a set of insns that only supports sizes other
3830 than those matching the insn suffix. */
3832 i386_operand_type mask, allowed;
3833 const insn_template *t;
3835 operand_type_set (&mask, 0);
3836 operand_type_set (&allowed, 0);
3838 for (t = current_templates->start;
3839 t < current_templates->end;
3841 allowed = operand_type_or (allowed,
3842 t->operand_types[op]);
3843 switch (guess_suffix)
3845 case QWORD_MNEM_SUFFIX:
3846 mask.bitfield.imm64 = 1;
3847 mask.bitfield.imm32s = 1;
3849 case LONG_MNEM_SUFFIX:
3850 mask.bitfield.imm32 = 1;
3852 case WORD_MNEM_SUFFIX:
3853 mask.bitfield.imm16 = 1;
3855 case BYTE_MNEM_SUFFIX:
3856 mask.bitfield.imm8 = 1;
3861 allowed = operand_type_and (mask, allowed);
3862 if (!operand_type_all_zero (&allowed))
3863 i.types[op] = operand_type_and (i.types[op], mask);
3870 /* Try to use the smallest displacement type too. */
3872 optimize_disp (void)
3876 for (op = i.operands; --op >= 0;)
3877 if (operand_type_check (i.types[op], disp))
3879 if (i.op[op].disps->X_op == O_constant)
3881 offsetT op_disp = i.op[op].disps->X_add_number;
3883 if (i.types[op].bitfield.disp16
3884 && (op_disp & ~(offsetT) 0xffff) == 0)
3886 /* If this operand is at most 16 bits, convert
3887 to a signed 16 bit number and don't use 64bit
3889 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3890 i.types[op].bitfield.disp64 = 0;
3892 if (i.types[op].bitfield.disp32
3893 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3895 /* If this operand is at most 32 bits, convert
3896 to a signed 32 bit number and don't use 64bit
3898 op_disp &= (((offsetT) 2 << 31) - 1);
3899 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3900 i.types[op].bitfield.disp64 = 0;
3902 if (!op_disp && i.types[op].bitfield.baseindex)
3904 i.types[op].bitfield.disp8 = 0;
3905 i.types[op].bitfield.disp16 = 0;
3906 i.types[op].bitfield.disp32 = 0;
3907 i.types[op].bitfield.disp32s = 0;
3908 i.types[op].bitfield.disp64 = 0;
3912 else if (flag_code == CODE_64BIT)
3914 if (fits_in_signed_long (op_disp))
3916 i.types[op].bitfield.disp64 = 0;
3917 i.types[op].bitfield.disp32s = 1;
3919 if (i.prefix[ADDR_PREFIX]
3920 && fits_in_unsigned_long (op_disp))
3921 i.types[op].bitfield.disp32 = 1;
3923 if ((i.types[op].bitfield.disp32
3924 || i.types[op].bitfield.disp32s
3925 || i.types[op].bitfield.disp16)
3926 && fits_in_signed_byte (op_disp))
3927 i.types[op].bitfield.disp8 = 1;
3929 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3930 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3932 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3933 i.op[op].disps, 0, i.reloc[op]);
3934 i.types[op].bitfield.disp8 = 0;
3935 i.types[op].bitfield.disp16 = 0;
3936 i.types[op].bitfield.disp32 = 0;
3937 i.types[op].bitfield.disp32s = 0;
3938 i.types[op].bitfield.disp64 = 0;
3941 /* We only support 64bit displacement on constants. */
3942 i.types[op].bitfield.disp64 = 0;
3946 /* Check if operands are valid for the instruction. */
3949 check_VecOperands (const insn_template *t)
3951 /* Without VSIB byte, we can't have a vector register for index. */
3952 if (!t->opcode_modifier.vecsib
3954 && (i.index_reg->reg_type.bitfield.regxmm
3955 || i.index_reg->reg_type.bitfield.regymm))
3957 i.error = unsupported_vector_index_register;
3961 /* For VSIB byte, we need a vector register for index and no PC
3962 relative addressing is allowed. */
3963 if (t->opcode_modifier.vecsib
3965 || !((t->opcode_modifier.vecsib == VecSIB128
3966 && i.index_reg->reg_type.bitfield.regxmm)
3967 || (t->opcode_modifier.vecsib == VecSIB256
3968 && i.index_reg->reg_type.bitfield.regymm))
3969 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3971 i.error = invalid_vsib_address;
3978 /* Check if operands are valid for the instruction. Update VEX
3982 VEX_check_operands (const insn_template *t)
3984 if (!t->opcode_modifier.vex)
3987 /* Only check VEX_Imm4, which must be the first operand. */
3988 if (t->operand_types[0].bitfield.vec_imm4)
3990 if (i.op[0].imms->X_op != O_constant
3991 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3997 /* Turn off Imm8 so that update_imm won't complain. */
3998 i.types[0] = vec_imm4;
4004 static const insn_template *
4005 match_template (void)
4007 /* Points to template once we've found it. */
4008 const insn_template *t;
4009 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4010 i386_operand_type overlap4;
4011 unsigned int found_reverse_match;
4012 i386_opcode_modifier suffix_check;
4013 i386_operand_type operand_types [MAX_OPERANDS];
4014 int addr_prefix_disp;
4016 unsigned int found_cpu_match;
4017 unsigned int check_register;
4018 enum i386_error specific_error = 0;
4020 #if MAX_OPERANDS != 5
4021 # error "MAX_OPERANDS must be 5."
4024 found_reverse_match = 0;
4025 addr_prefix_disp = -1;
4027 memset (&suffix_check, 0, sizeof (suffix_check));
4028 if (i.suffix == BYTE_MNEM_SUFFIX)
4029 suffix_check.no_bsuf = 1;
4030 else if (i.suffix == WORD_MNEM_SUFFIX)
4031 suffix_check.no_wsuf = 1;
4032 else if (i.suffix == SHORT_MNEM_SUFFIX)
4033 suffix_check.no_ssuf = 1;
4034 else if (i.suffix == LONG_MNEM_SUFFIX)
4035 suffix_check.no_lsuf = 1;
4036 else if (i.suffix == QWORD_MNEM_SUFFIX)
4037 suffix_check.no_qsuf = 1;
4038 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4039 suffix_check.no_ldsuf = 1;
4041 /* Must have right number of operands. */
4042 i.error = number_of_operands_mismatch;
4044 for (t = current_templates->start; t < current_templates->end; t++)
4046 addr_prefix_disp = -1;
4048 if (i.operands != t->operands)
4051 /* Check processor support. */
4052 i.error = unsupported;
4053 found_cpu_match = (cpu_flags_match (t)
4054 == CPU_FLAGS_PERFECT_MATCH);
4055 if (!found_cpu_match)
4058 /* Check old gcc support. */
4059 i.error = old_gcc_only;
4060 if (!old_gcc && t->opcode_modifier.oldgcc)
4063 /* Check AT&T mnemonic. */
4064 i.error = unsupported_with_intel_mnemonic;
4065 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4068 /* Check AT&T/Intel syntax. */
4069 i.error = unsupported_syntax;
4070 if ((intel_syntax && t->opcode_modifier.attsyntax)
4071 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4074 /* Check the suffix, except for some instructions in intel mode. */
4075 i.error = invalid_instruction_suffix;
4076 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4077 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4078 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4079 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4080 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4081 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4082 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4085 if (!operand_size_match (t))
4088 for (j = 0; j < MAX_OPERANDS; j++)
4089 operand_types[j] = t->operand_types[j];
4091 /* In general, don't allow 64-bit operands in 32-bit mode. */
4092 if (i.suffix == QWORD_MNEM_SUFFIX
4093 && flag_code != CODE_64BIT
4095 ? (!t->opcode_modifier.ignoresize
4096 && !intel_float_operand (t->name))
4097 : intel_float_operand (t->name) != 2)
4098 && ((!operand_types[0].bitfield.regmmx
4099 && !operand_types[0].bitfield.regxmm
4100 && !operand_types[0].bitfield.regymm)
4101 || (!operand_types[t->operands > 1].bitfield.regmmx
4102 && !!operand_types[t->operands > 1].bitfield.regxmm
4103 && !!operand_types[t->operands > 1].bitfield.regymm))
4104 && (t->base_opcode != 0x0fc7
4105 || t->extension_opcode != 1 /* cmpxchg8b */))
4108 /* In general, don't allow 32-bit operands on pre-386. */
4109 else if (i.suffix == LONG_MNEM_SUFFIX
4110 && !cpu_arch_flags.bitfield.cpui386
4112 ? (!t->opcode_modifier.ignoresize
4113 && !intel_float_operand (t->name))
4114 : intel_float_operand (t->name) != 2)
4115 && ((!operand_types[0].bitfield.regmmx
4116 && !operand_types[0].bitfield.regxmm)
4117 || (!operand_types[t->operands > 1].bitfield.regmmx
4118 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4121 /* Do not verify operands when there are none. */
4125 /* We've found a match; break out of loop. */
4129 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4130 into Disp32/Disp16/Disp32 operand. */
4131 if (i.prefix[ADDR_PREFIX] != 0)
4133 /* There should be only one Disp operand. */
4137 for (j = 0; j < MAX_OPERANDS; j++)
4139 if (operand_types[j].bitfield.disp16)
4141 addr_prefix_disp = j;
4142 operand_types[j].bitfield.disp32 = 1;
4143 operand_types[j].bitfield.disp16 = 0;
4149 for (j = 0; j < MAX_OPERANDS; j++)
4151 if (operand_types[j].bitfield.disp32)
4153 addr_prefix_disp = j;
4154 operand_types[j].bitfield.disp32 = 0;
4155 operand_types[j].bitfield.disp16 = 1;
4161 for (j = 0; j < MAX_OPERANDS; j++)
4163 if (operand_types[j].bitfield.disp64)
4165 addr_prefix_disp = j;
4166 operand_types[j].bitfield.disp64 = 0;
4167 operand_types[j].bitfield.disp32 = 1;
4175 /* We check register size if needed. */
4176 check_register = t->opcode_modifier.checkregsize;
4177 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4178 switch (t->operands)
4181 if (!operand_type_match (overlap0, i.types[0]))
4185 /* xchg %eax, %eax is a special case. It is an aliase for nop
4186 only in 32bit mode and we can use opcode 0x90. In 64bit
4187 mode, we can't use 0x90 for xchg %eax, %eax since it should
4188 zero-extend %eax to %rax. */
4189 if (flag_code == CODE_64BIT
4190 && t->base_opcode == 0x90
4191 && operand_type_equal (&i.types [0], &acc32)
4192 && operand_type_equal (&i.types [1], &acc32))
4196 /* If we swap operand in encoding, we either match
4197 the next one or reverse direction of operands. */
4198 if (t->opcode_modifier.s)
4200 else if (t->opcode_modifier.d)
4205 /* If we swap operand in encoding, we match the next one. */
4206 if (i.swap_operand && t->opcode_modifier.s)
4210 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4211 if (!operand_type_match (overlap0, i.types[0])
4212 || !operand_type_match (overlap1, i.types[1])
4214 && !operand_type_register_match (overlap0, i.types[0],
4216 overlap1, i.types[1],
4219 /* Check if other direction is valid ... */
4220 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4224 /* Try reversing direction of operands. */
4225 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4226 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4227 if (!operand_type_match (overlap0, i.types[0])
4228 || !operand_type_match (overlap1, i.types[1])
4230 && !operand_type_register_match (overlap0,
4237 /* Does not match either direction. */
4240 /* found_reverse_match holds which of D or FloatDR
4242 if (t->opcode_modifier.d)
4243 found_reverse_match = Opcode_D;
4244 else if (t->opcode_modifier.floatd)
4245 found_reverse_match = Opcode_FloatD;
4247 found_reverse_match = 0;
4248 if (t->opcode_modifier.floatr)
4249 found_reverse_match |= Opcode_FloatR;
4253 /* Found a forward 2 operand match here. */
4254 switch (t->operands)
4257 overlap4 = operand_type_and (i.types[4],
4260 overlap3 = operand_type_and (i.types[3],
4263 overlap2 = operand_type_and (i.types[2],
4268 switch (t->operands)
4271 if (!operand_type_match (overlap4, i.types[4])
4272 || !operand_type_register_match (overlap3,
4280 if (!operand_type_match (overlap3, i.types[3])
4282 && !operand_type_register_match (overlap2,
4290 /* Here we make use of the fact that there are no
4291 reverse match 3 operand instructions, and all 3
4292 operand instructions only need to be checked for
4293 register consistency between operands 2 and 3. */
4294 if (!operand_type_match (overlap2, i.types[2])
4296 && !operand_type_register_match (overlap1,
4306 /* Found either forward/reverse 2, 3 or 4 operand match here:
4307 slip through to break. */
4309 if (!found_cpu_match)
4311 found_reverse_match = 0;
4315 /* Check if vector and VEX operands are valid. */
4316 if (check_VecOperands (t) || VEX_check_operands (t))
4318 specific_error = i.error;
4322 /* We've found a match; break out of loop. */
4326 if (t == current_templates->end)
4328 /* We found no match. */
4329 const char *err_msg;
4330 switch (specific_error ? specific_error : i.error)
4334 case operand_size_mismatch:
4335 err_msg = _("operand size mismatch");
4337 case operand_type_mismatch:
4338 err_msg = _("operand type mismatch");
4340 case register_type_mismatch:
4341 err_msg = _("register type mismatch");
4343 case number_of_operands_mismatch:
4344 err_msg = _("number of operands mismatch");
4346 case invalid_instruction_suffix:
4347 err_msg = _("invalid instruction suffix");
4350 err_msg = _("Imm4 isn't the first operand");
4353 err_msg = _("only supported with old gcc");
4355 case unsupported_with_intel_mnemonic:
4356 err_msg = _("unsupported with Intel mnemonic");
4358 case unsupported_syntax:
4359 err_msg = _("unsupported syntax");
4362 as_bad (_("unsupported instruction `%s'"),
4363 current_templates->start->name);
4365 case invalid_vsib_address:
4366 err_msg = _("invalid VSIB address");
4368 case unsupported_vector_index_register:
4369 err_msg = _("unsupported vector index register");
4372 as_bad (_("%s for `%s'"), err_msg,
4373 current_templates->start->name);
4377 if (!quiet_warnings)
4380 && (i.types[0].bitfield.jumpabsolute
4381 != operand_types[0].bitfield.jumpabsolute))
4383 as_warn (_("indirect %s without `*'"), t->name);
4386 if (t->opcode_modifier.isprefix
4387 && t->opcode_modifier.ignoresize)
4389 /* Warn them that a data or address size prefix doesn't
4390 affect assembly of the next line of code. */
4391 as_warn (_("stand-alone `%s' prefix"), t->name);
4395 /* Copy the template we found. */
4398 if (addr_prefix_disp != -1)
4399 i.tm.operand_types[addr_prefix_disp]
4400 = operand_types[addr_prefix_disp];
4402 if (found_reverse_match)
4404 /* If we found a reverse match we must alter the opcode
4405 direction bit. found_reverse_match holds bits to change
4406 (different for int & float insns). */
4408 i.tm.base_opcode ^= found_reverse_match;
4410 i.tm.operand_types[0] = operand_types[1];
4411 i.tm.operand_types[1] = operand_types[0];
4420 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4421 if (i.tm.operand_types[mem_op].bitfield.esseg)
4423 if (i.seg[0] != NULL && i.seg[0] != &es)
4425 as_bad (_("`%s' operand %d must use `%ses' segment"),
4431 /* There's only ever one segment override allowed per instruction.
4432 This instruction possibly has a legal segment override on the
4433 second operand, so copy the segment to where non-string
4434 instructions store it, allowing common code. */
4435 i.seg[0] = i.seg[1];
4437 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4439 if (i.seg[1] != NULL && i.seg[1] != &es)
4441 as_bad (_("`%s' operand %d must use `%ses' segment"),
4452 process_suffix (void)
4454 /* If matched instruction specifies an explicit instruction mnemonic
4456 if (i.tm.opcode_modifier.size16)
4457 i.suffix = WORD_MNEM_SUFFIX;
4458 else if (i.tm.opcode_modifier.size32)
4459 i.suffix = LONG_MNEM_SUFFIX;
4460 else if (i.tm.opcode_modifier.size64)
4461 i.suffix = QWORD_MNEM_SUFFIX;
4462 else if (i.reg_operands)
4464 /* If there's no instruction mnemonic suffix we try to invent one
4465 based on register operands. */
4468 /* We take i.suffix from the last register operand specified,
4469 Destination register type is more significant than source
4470 register type. crc32 in SSE4.2 prefers source register
4472 if (i.tm.base_opcode == 0xf20f38f1)
4474 if (i.types[0].bitfield.reg16)
4475 i.suffix = WORD_MNEM_SUFFIX;
4476 else if (i.types[0].bitfield.reg32)
4477 i.suffix = LONG_MNEM_SUFFIX;
4478 else if (i.types[0].bitfield.reg64)
4479 i.suffix = QWORD_MNEM_SUFFIX;
4481 else if (i.tm.base_opcode == 0xf20f38f0)
4483 if (i.types[0].bitfield.reg8)
4484 i.suffix = BYTE_MNEM_SUFFIX;
4491 if (i.tm.base_opcode == 0xf20f38f1
4492 || i.tm.base_opcode == 0xf20f38f0)
4494 /* We have to know the operand size for crc32. */
4495 as_bad (_("ambiguous memory operand size for `%s`"),
4500 for (op = i.operands; --op >= 0;)
4501 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4503 if (i.types[op].bitfield.reg8)
4505 i.suffix = BYTE_MNEM_SUFFIX;
4508 else if (i.types[op].bitfield.reg16)
4510 i.suffix = WORD_MNEM_SUFFIX;
4513 else if (i.types[op].bitfield.reg32)
4515 i.suffix = LONG_MNEM_SUFFIX;
4518 else if (i.types[op].bitfield.reg64)
4520 i.suffix = QWORD_MNEM_SUFFIX;
4526 else if (i.suffix == BYTE_MNEM_SUFFIX)
4529 && i.tm.opcode_modifier.ignoresize
4530 && i.tm.opcode_modifier.no_bsuf)
4532 else if (!check_byte_reg ())
4535 else if (i.suffix == LONG_MNEM_SUFFIX)
4538 && i.tm.opcode_modifier.ignoresize
4539 && i.tm.opcode_modifier.no_lsuf)
4541 else if (!check_long_reg ())
4544 else if (i.suffix == QWORD_MNEM_SUFFIX)
4547 && i.tm.opcode_modifier.ignoresize
4548 && i.tm.opcode_modifier.no_qsuf)
4550 else if (!check_qword_reg ())
4553 else if (i.suffix == WORD_MNEM_SUFFIX)
4556 && i.tm.opcode_modifier.ignoresize
4557 && i.tm.opcode_modifier.no_wsuf)
4559 else if (!check_word_reg ())
4562 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4563 || i.suffix == YMMWORD_MNEM_SUFFIX)
4565 /* Skip if the instruction has x/y suffix. match_template
4566 should check if it is a valid suffix. */
4568 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4569 /* Do nothing if the instruction is going to ignore the prefix. */
4574 else if (i.tm.opcode_modifier.defaultsize
4576 /* exclude fldenv/frstor/fsave/fstenv */
4577 && i.tm.opcode_modifier.no_ssuf)
4579 i.suffix = stackop_size;
4581 else if (intel_syntax
4583 && (i.tm.operand_types[0].bitfield.jumpabsolute
4584 || i.tm.opcode_modifier.jumpbyte
4585 || i.tm.opcode_modifier.jumpintersegment
4586 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4587 && i.tm.extension_opcode <= 3)))
4592 if (!i.tm.opcode_modifier.no_qsuf)
4594 i.suffix = QWORD_MNEM_SUFFIX;
4598 if (!i.tm.opcode_modifier.no_lsuf)
4599 i.suffix = LONG_MNEM_SUFFIX;
4602 if (!i.tm.opcode_modifier.no_wsuf)
4603 i.suffix = WORD_MNEM_SUFFIX;
4612 if (i.tm.opcode_modifier.w)
4614 as_bad (_("no instruction mnemonic suffix given and "
4615 "no register operands; can't size instruction"));
4621 unsigned int suffixes;
4623 suffixes = !i.tm.opcode_modifier.no_bsuf;
4624 if (!i.tm.opcode_modifier.no_wsuf)
4626 if (!i.tm.opcode_modifier.no_lsuf)
4628 if (!i.tm.opcode_modifier.no_ldsuf)
4630 if (!i.tm.opcode_modifier.no_ssuf)
4632 if (!i.tm.opcode_modifier.no_qsuf)
4635 /* There are more than suffix matches. */
4636 if (i.tm.opcode_modifier.w
4637 || ((suffixes & (suffixes - 1))
4638 && !i.tm.opcode_modifier.defaultsize
4639 && !i.tm.opcode_modifier.ignoresize))
4641 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4647 /* Change the opcode based on the operand size given by i.suffix;
4648 We don't need to change things for byte insns. */
4651 && i.suffix != BYTE_MNEM_SUFFIX
4652 && i.suffix != XMMWORD_MNEM_SUFFIX
4653 && i.suffix != YMMWORD_MNEM_SUFFIX)
4655 /* It's not a byte, select word/dword operation. */
4656 if (i.tm.opcode_modifier.w)
4658 if (i.tm.opcode_modifier.shortform)
4659 i.tm.base_opcode |= 8;
4661 i.tm.base_opcode |= 1;
4664 /* Now select between word & dword operations via the operand
4665 size prefix, except for instructions that will ignore this
4667 if (i.tm.opcode_modifier.addrprefixop0)
4669 /* The address size override prefix changes the size of the
4671 if ((flag_code == CODE_32BIT
4672 && i.op->regs[0].reg_type.bitfield.reg16)
4673 || (flag_code != CODE_32BIT
4674 && i.op->regs[0].reg_type.bitfield.reg32))
4675 if (!add_prefix (ADDR_PREFIX_OPCODE))
4678 else if (i.suffix != QWORD_MNEM_SUFFIX
4679 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4680 && !i.tm.opcode_modifier.ignoresize
4681 && !i.tm.opcode_modifier.floatmf
4682 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4683 || (flag_code == CODE_64BIT
4684 && i.tm.opcode_modifier.jumpbyte)))
4686 unsigned int prefix = DATA_PREFIX_OPCODE;
4688 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4689 prefix = ADDR_PREFIX_OPCODE;
4691 if (!add_prefix (prefix))
4695 /* Set mode64 for an operand. */
4696 if (i.suffix == QWORD_MNEM_SUFFIX
4697 && flag_code == CODE_64BIT
4698 && !i.tm.opcode_modifier.norex64)
4700 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4701 need rex64. cmpxchg8b is also a special case. */
4702 if (! (i.operands == 2
4703 && i.tm.base_opcode == 0x90
4704 && i.tm.extension_opcode == None
4705 && operand_type_equal (&i.types [0], &acc64)
4706 && operand_type_equal (&i.types [1], &acc64))
4707 && ! (i.operands == 1
4708 && i.tm.base_opcode == 0xfc7
4709 && i.tm.extension_opcode == 1
4710 && !operand_type_check (i.types [0], reg)
4711 && operand_type_check (i.types [0], anymem)))
4715 /* Size floating point instruction. */
4716 if (i.suffix == LONG_MNEM_SUFFIX)
4717 if (i.tm.opcode_modifier.floatmf)
4718 i.tm.base_opcode ^= 4;
4725 check_byte_reg (void)
4729 for (op = i.operands; --op >= 0;)
4731 /* If this is an eight bit register, it's OK. If it's the 16 or
4732 32 bit version of an eight bit register, we will just use the
4733 low portion, and that's OK too. */
4734 if (i.types[op].bitfield.reg8)
4737 /* I/O port address operands are OK too. */
4738 if (i.tm.operand_types[op].bitfield.inoutportreg)
4741 /* crc32 doesn't generate this warning. */
4742 if (i.tm.base_opcode == 0xf20f38f0)
4745 if ((i.types[op].bitfield.reg16
4746 || i.types[op].bitfield.reg32
4747 || i.types[op].bitfield.reg64)
4748 && i.op[op].regs->reg_num < 4
4749 /* Prohibit these changes in 64bit mode, since the lowering
4750 would be more complicated. */
4751 && flag_code != CODE_64BIT)
4753 #if REGISTER_WARNINGS
4754 if (!quiet_warnings)
4755 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4757 (i.op[op].regs + (i.types[op].bitfield.reg16
4758 ? REGNAM_AL - REGNAM_AX
4759 : REGNAM_AL - REGNAM_EAX))->reg_name,
4761 i.op[op].regs->reg_name,
4766 /* Any other register is bad. */
4767 if (i.types[op].bitfield.reg16
4768 || i.types[op].bitfield.reg32
4769 || i.types[op].bitfield.reg64
4770 || i.types[op].bitfield.regmmx
4771 || i.types[op].bitfield.regxmm
4772 || i.types[op].bitfield.regymm
4773 || i.types[op].bitfield.sreg2
4774 || i.types[op].bitfield.sreg3
4775 || i.types[op].bitfield.control
4776 || i.types[op].bitfield.debug
4777 || i.types[op].bitfield.test
4778 || i.types[op].bitfield.floatreg
4779 || i.types[op].bitfield.floatacc)
4781 as_bad (_("`%s%s' not allowed with `%s%c'"),
4783 i.op[op].regs->reg_name,
4793 check_long_reg (void)
4797 for (op = i.operands; --op >= 0;)
4798 /* Reject eight bit registers, except where the template requires
4799 them. (eg. movzb) */
4800 if (i.types[op].bitfield.reg8
4801 && (i.tm.operand_types[op].bitfield.reg16
4802 || i.tm.operand_types[op].bitfield.reg32
4803 || i.tm.operand_types[op].bitfield.acc))
4805 as_bad (_("`%s%s' not allowed with `%s%c'"),
4807 i.op[op].regs->reg_name,
4812 /* Warn if the e prefix on a general reg is missing. */
4813 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4814 && i.types[op].bitfield.reg16
4815 && (i.tm.operand_types[op].bitfield.reg32
4816 || i.tm.operand_types[op].bitfield.acc))
4818 /* Prohibit these changes in the 64bit mode, since the
4819 lowering is more complicated. */
4820 if (flag_code == CODE_64BIT)
4822 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4823 register_prefix, i.op[op].regs->reg_name,
4827 #if REGISTER_WARNINGS
4829 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4831 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4833 i.op[op].regs->reg_name,
4837 /* Warn if the r prefix on a general reg is missing. */
4838 else if (i.types[op].bitfield.reg64
4839 && (i.tm.operand_types[op].bitfield.reg32
4840 || i.tm.operand_types[op].bitfield.acc))
4843 && i.tm.opcode_modifier.toqword
4844 && !i.types[0].bitfield.regxmm)
4846 /* Convert to QWORD. We want REX byte. */
4847 i.suffix = QWORD_MNEM_SUFFIX;
4851 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4852 register_prefix, i.op[op].regs->reg_name,
4861 check_qword_reg (void)
4865 for (op = i.operands; --op >= 0; )
4866 /* Reject eight bit registers, except where the template requires
4867 them. (eg. movzb) */
4868 if (i.types[op].bitfield.reg8
4869 && (i.tm.operand_types[op].bitfield.reg16
4870 || i.tm.operand_types[op].bitfield.reg32
4871 || i.tm.operand_types[op].bitfield.acc))
4873 as_bad (_("`%s%s' not allowed with `%s%c'"),
4875 i.op[op].regs->reg_name,
4880 /* Warn if the e prefix on a general reg is missing. */
4881 else if ((i.types[op].bitfield.reg16
4882 || i.types[op].bitfield.reg32)
4883 && (i.tm.operand_types[op].bitfield.reg32
4884 || i.tm.operand_types[op].bitfield.acc))
4886 /* Prohibit these changes in the 64bit mode, since the
4887 lowering is more complicated. */
4889 && i.tm.opcode_modifier.todword
4890 && !i.types[0].bitfield.regxmm)
4892 /* Convert to DWORD. We don't want REX byte. */
4893 i.suffix = LONG_MNEM_SUFFIX;
4897 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4898 register_prefix, i.op[op].regs->reg_name,
4907 check_word_reg (void)
4910 for (op = i.operands; --op >= 0;)
4911 /* Reject eight bit registers, except where the template requires
4912 them. (eg. movzb) */
4913 if (i.types[op].bitfield.reg8
4914 && (i.tm.operand_types[op].bitfield.reg16
4915 || i.tm.operand_types[op].bitfield.reg32
4916 || i.tm.operand_types[op].bitfield.acc))
4918 as_bad (_("`%s%s' not allowed with `%s%c'"),
4920 i.op[op].regs->reg_name,
4925 /* Warn if the e prefix on a general reg is present. */
4926 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4927 && i.types[op].bitfield.reg32
4928 && (i.tm.operand_types[op].bitfield.reg16
4929 || i.tm.operand_types[op].bitfield.acc))
4931 /* Prohibit these changes in the 64bit mode, since the
4932 lowering is more complicated. */
4933 if (flag_code == CODE_64BIT)
4935 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4936 register_prefix, i.op[op].regs->reg_name,
4941 #if REGISTER_WARNINGS
4942 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4944 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4946 i.op[op].regs->reg_name,
4954 update_imm (unsigned int j)
4956 i386_operand_type overlap = i.types[j];
4957 if ((overlap.bitfield.imm8
4958 || overlap.bitfield.imm8s
4959 || overlap.bitfield.imm16
4960 || overlap.bitfield.imm32
4961 || overlap.bitfield.imm32s
4962 || overlap.bitfield.imm64)
4963 && !operand_type_equal (&overlap, &imm8)
4964 && !operand_type_equal (&overlap, &imm8s)
4965 && !operand_type_equal (&overlap, &imm16)
4966 && !operand_type_equal (&overlap, &imm32)
4967 && !operand_type_equal (&overlap, &imm32s)
4968 && !operand_type_equal (&overlap, &imm64))
4972 i386_operand_type temp;
4974 operand_type_set (&temp, 0);
4975 if (i.suffix == BYTE_MNEM_SUFFIX)
4977 temp.bitfield.imm8 = overlap.bitfield.imm8;
4978 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4980 else if (i.suffix == WORD_MNEM_SUFFIX)
4981 temp.bitfield.imm16 = overlap.bitfield.imm16;
4982 else if (i.suffix == QWORD_MNEM_SUFFIX)
4984 temp.bitfield.imm64 = overlap.bitfield.imm64;
4985 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4988 temp.bitfield.imm32 = overlap.bitfield.imm32;
4991 else if (operand_type_equal (&overlap, &imm16_32_32s)
4992 || operand_type_equal (&overlap, &imm16_32)
4993 || operand_type_equal (&overlap, &imm16_32s))
4995 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5000 if (!operand_type_equal (&overlap, &imm8)
5001 && !operand_type_equal (&overlap, &imm8s)
5002 && !operand_type_equal (&overlap, &imm16)
5003 && !operand_type_equal (&overlap, &imm32)
5004 && !operand_type_equal (&overlap, &imm32s)
5005 && !operand_type_equal (&overlap, &imm64))
5007 as_bad (_("no instruction mnemonic suffix given; "
5008 "can't determine immediate size"));
5012 i.types[j] = overlap;
5022 /* Update the first 2 immediate operands. */
5023 n = i.operands > 2 ? 2 : i.operands;
5026 for (j = 0; j < n; j++)
5027 if (update_imm (j) == 0)
5030 /* The 3rd operand can't be immediate operand. */
5031 gas_assert (operand_type_check (i.types[2], imm) == 0);
5038 bad_implicit_operand (int xmm)
5040 const char *ireg = xmm ? "xmm0" : "ymm0";
5043 as_bad (_("the last operand of `%s' must be `%s%s'"),
5044 i.tm.name, register_prefix, ireg);
5046 as_bad (_("the first operand of `%s' must be `%s%s'"),
5047 i.tm.name, register_prefix, ireg);
5052 process_operands (void)
5054 /* Default segment register this instruction will use for memory
5055 accesses. 0 means unknown. This is only for optimizing out
5056 unnecessary segment overrides. */
5057 const seg_entry *default_seg = 0;
5059 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5061 unsigned int dupl = i.operands;
5062 unsigned int dest = dupl - 1;
5065 /* The destination must be an xmm register. */
5066 gas_assert (i.reg_operands
5067 && MAX_OPERANDS > dupl
5068 && operand_type_equal (&i.types[dest], ®xmm));
5070 if (i.tm.opcode_modifier.firstxmm0)
5072 /* The first operand is implicit and must be xmm0. */
5073 gas_assert (operand_type_equal (&i.types[0], ®xmm));
5074 if (i.op[0].regs->reg_num != 0)
5075 return bad_implicit_operand (1);
5077 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5079 /* Keep xmm0 for instructions with VEX prefix and 3
5085 /* We remove the first xmm0 and keep the number of
5086 operands unchanged, which in fact duplicates the
5088 for (j = 1; j < i.operands; j++)
5090 i.op[j - 1] = i.op[j];
5091 i.types[j - 1] = i.types[j];
5092 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5096 else if (i.tm.opcode_modifier.implicit1stxmm0)
5098 gas_assert ((MAX_OPERANDS - 1) > dupl
5099 && (i.tm.opcode_modifier.vexsources
5102 /* Add the implicit xmm0 for instructions with VEX prefix
5104 for (j = i.operands; j > 0; j--)
5106 i.op[j] = i.op[j - 1];
5107 i.types[j] = i.types[j - 1];
5108 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5111 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5112 i.types[0] = regxmm;
5113 i.tm.operand_types[0] = regxmm;
5116 i.reg_operands += 2;
5121 i.op[dupl] = i.op[dest];
5122 i.types[dupl] = i.types[dest];
5123 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5132 i.op[dupl] = i.op[dest];
5133 i.types[dupl] = i.types[dest];
5134 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5137 if (i.tm.opcode_modifier.immext)
5140 else if (i.tm.opcode_modifier.firstxmm0)
5144 /* The first operand is implicit and must be xmm0/ymm0. */
5145 gas_assert (i.reg_operands
5146 && (operand_type_equal (&i.types[0], ®xmm)
5147 || operand_type_equal (&i.types[0], ®ymm)));
5148 if (i.op[0].regs->reg_num != 0)
5149 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5151 for (j = 1; j < i.operands; j++)
5153 i.op[j - 1] = i.op[j];
5154 i.types[j - 1] = i.types[j];
5156 /* We need to adjust fields in i.tm since they are used by
5157 build_modrm_byte. */
5158 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5165 else if (i.tm.opcode_modifier.regkludge)
5167 /* The imul $imm, %reg instruction is converted into
5168 imul $imm, %reg, %reg, and the clr %reg instruction
5169 is converted into xor %reg, %reg. */
5171 unsigned int first_reg_op;
5173 if (operand_type_check (i.types[0], reg))
5177 /* Pretend we saw the extra register operand. */
5178 gas_assert (i.reg_operands == 1
5179 && i.op[first_reg_op + 1].regs == 0);
5180 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5181 i.types[first_reg_op + 1] = i.types[first_reg_op];
5186 if (i.tm.opcode_modifier.shortform)
5188 if (i.types[0].bitfield.sreg2
5189 || i.types[0].bitfield.sreg3)
5191 if (i.tm.base_opcode == POP_SEG_SHORT
5192 && i.op[0].regs->reg_num == 1)
5194 as_bad (_("you can't `pop %scs'"), register_prefix);
5197 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5198 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5203 /* The register or float register operand is in operand
5207 if (i.types[0].bitfield.floatreg
5208 || operand_type_check (i.types[0], reg))
5212 /* Register goes in low 3 bits of opcode. */
5213 i.tm.base_opcode |= i.op[op].regs->reg_num;
5214 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5216 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5218 /* Warn about some common errors, but press on regardless.
5219 The first case can be generated by gcc (<= 2.8.1). */
5220 if (i.operands == 2)
5222 /* Reversed arguments on faddp, fsubp, etc. */
5223 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5224 register_prefix, i.op[!intel_syntax].regs->reg_name,
5225 register_prefix, i.op[intel_syntax].regs->reg_name);
5229 /* Extraneous `l' suffix on fp insn. */
5230 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5231 register_prefix, i.op[0].regs->reg_name);
5236 else if (i.tm.opcode_modifier.modrm)
5238 /* The opcode is completed (modulo i.tm.extension_opcode which
5239 must be put into the modrm byte). Now, we make the modrm and
5240 index base bytes based on all the info we've collected. */
5242 default_seg = build_modrm_byte ();
5244 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5248 else if (i.tm.opcode_modifier.isstring)
5250 /* For the string instructions that allow a segment override
5251 on one of their operands, the default segment is ds. */
5255 if (i.tm.base_opcode == 0x8d /* lea */
5258 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5260 /* If a segment was explicitly specified, and the specified segment
5261 is not the default, use an opcode prefix to select it. If we
5262 never figured out what the default segment is, then default_seg
5263 will be zero at this point, and the specified segment prefix will
5265 if ((i.seg[0]) && (i.seg[0] != default_seg))
5267 if (!add_prefix (i.seg[0]->seg_prefix))
5273 static const seg_entry *
5274 build_modrm_byte (void)
5276 const seg_entry *default_seg = 0;
5277 unsigned int source, dest;
5280 /* The first operand of instructions with VEX prefix and 3 sources
5281 must be VEX_Imm4. */
5282 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5285 unsigned int nds, reg_slot;
5288 if (i.tm.opcode_modifier.veximmext
5289 && i.tm.opcode_modifier.immext)
5291 dest = i.operands - 2;
5292 gas_assert (dest == 3);
5295 dest = i.operands - 1;
5298 /* There are 2 kinds of instructions:
5299 1. 5 operands: 4 register operands or 3 register operands
5300 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5301 VexW0 or VexW1. The destination must be either XMM or YMM
5303 2. 4 operands: 4 register operands or 3 register operands
5304 plus 1 memory operand, VexXDS, and VexImmExt */
5305 gas_assert ((i.reg_operands == 4
5306 || (i.reg_operands == 3 && i.mem_operands == 1))
5307 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5308 && (i.tm.opcode_modifier.veximmext
5309 || (i.imm_operands == 1
5310 && i.types[0].bitfield.vec_imm4
5311 && (i.tm.opcode_modifier.vexw == VEXW0
5312 || i.tm.opcode_modifier.vexw == VEXW1)
5313 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5314 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)))));
5316 if (i.imm_operands == 0)
5318 /* When there is no immediate operand, generate an 8bit
5319 immediate operand to encode the first operand. */
5320 exp = &im_expressions[i.imm_operands++];
5321 i.op[i.operands].imms = exp;
5322 i.types[i.operands] = imm8;
5324 /* If VexW1 is set, the first operand is the source and
5325 the second operand is encoded in the immediate operand. */
5326 if (i.tm.opcode_modifier.vexw == VEXW1)
5337 /* FMA swaps REG and NDS. */
5338 if (i.tm.cpu_flags.bitfield.cpufma)
5346 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5348 || operand_type_equal (&i.tm.operand_types[reg_slot],
5350 exp->X_op = O_constant;
5352 = ((i.op[reg_slot].regs->reg_num
5353 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5358 unsigned int imm_slot;
5360 if (i.tm.opcode_modifier.vexw == VEXW0)
5362 /* If VexW0 is set, the third operand is the source and
5363 the second operand is encoded in the immediate
5370 /* VexW1 is set, the second operand is the source and
5371 the third operand is encoded in the immediate
5377 if (i.tm.opcode_modifier.immext)
5379 /* When ImmExt is set, the immdiate byte is the last
5381 imm_slot = i.operands - 1;
5389 /* Turn on Imm8 so that output_imm will generate it. */
5390 i.types[imm_slot].bitfield.imm8 = 1;
5393 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5395 || operand_type_equal (&i.tm.operand_types[reg_slot],
5397 i.op[imm_slot].imms->X_add_number
5398 |= ((i.op[reg_slot].regs->reg_num
5399 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5403 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
5404 || operand_type_equal (&i.tm.operand_types[nds],
5406 i.vex.register_specifier = i.op[nds].regs;
5411 /* i.reg_operands MUST be the number of real register operands;
5412 implicit registers do not count. If there are 3 register
5413 operands, it must be a instruction with VexNDS. For a
5414 instruction with VexNDD, the destination register is encoded
5415 in VEX prefix. If there are 4 register operands, it must be
5416 a instruction with VEX prefix and 3 sources. */
5417 if (i.mem_operands == 0
5418 && ((i.reg_operands == 2
5419 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5420 || (i.reg_operands == 3
5421 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5422 || (i.reg_operands == 4 && vex_3_sources)))
5430 /* When there are 3 operands, one of them may be immediate,
5431 which may be the first or the last operand. Otherwise,
5432 the first operand must be shift count register (cl) or it
5433 is an instruction with VexNDS. */
5434 gas_assert (i.imm_operands == 1
5435 || (i.imm_operands == 0
5436 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5437 || i.types[0].bitfield.shiftcount)));
5438 if (operand_type_check (i.types[0], imm)
5439 || i.types[0].bitfield.shiftcount)
5445 /* When there are 4 operands, the first two must be 8bit
5446 immediate operands. The source operand will be the 3rd
5449 For instructions with VexNDS, if the first operand
5450 an imm8, the source operand is the 2nd one. If the last
5451 operand is imm8, the source operand is the first one. */
5452 gas_assert ((i.imm_operands == 2
5453 && i.types[0].bitfield.imm8
5454 && i.types[1].bitfield.imm8)
5455 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5456 && i.imm_operands == 1
5457 && (i.types[0].bitfield.imm8
5458 || i.types[i.operands - 1].bitfield.imm8)));
5459 if (i.imm_operands == 2)
5463 if (i.types[0].bitfield.imm8)
5479 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5481 /* For instructions with VexNDS, the register-only
5482 source operand must be 32/64bit integer, XMM or
5483 YMM register. It is encoded in VEX prefix. We
5484 need to clear RegMem bit before calling
5485 operand_type_equal. */
5487 i386_operand_type op;
5490 /* Check register-only source operand when two source
5491 operands are swapped. */
5492 if (!i.tm.operand_types[source].bitfield.baseindex
5493 && i.tm.operand_types[dest].bitfield.baseindex)
5501 op = i.tm.operand_types[vvvv];
5502 op.bitfield.regmem = 0;
5503 if ((dest + 1) >= i.operands
5504 || (op.bitfield.reg32 != 1
5505 && !op.bitfield.reg64 != 1
5506 && !operand_type_equal (&op, ®xmm)
5507 && !operand_type_equal (&op, ®ymm)))
5509 i.vex.register_specifier = i.op[vvvv].regs;
5515 /* One of the register operands will be encoded in the i.tm.reg
5516 field, the other in the combined i.tm.mode and i.tm.regmem
5517 fields. If no form of this instruction supports a memory
5518 destination operand, then we assume the source operand may
5519 sometimes be a memory operand and so we need to store the
5520 destination in the i.rm.reg field. */
5521 if (!i.tm.operand_types[dest].bitfield.regmem
5522 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5524 i.rm.reg = i.op[dest].regs->reg_num;
5525 i.rm.regmem = i.op[source].regs->reg_num;
5526 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5528 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5533 i.rm.reg = i.op[source].regs->reg_num;
5534 i.rm.regmem = i.op[dest].regs->reg_num;
5535 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5537 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5540 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5542 if (!i.types[0].bitfield.control
5543 && !i.types[1].bitfield.control)
5545 i.rex &= ~(REX_R | REX_B);
5546 add_prefix (LOCK_PREFIX_OPCODE);
5550 { /* If it's not 2 reg operands... */
5555 unsigned int fake_zero_displacement = 0;
5558 for (op = 0; op < i.operands; op++)
5559 if (operand_type_check (i.types[op], anymem))
5561 gas_assert (op < i.operands);
5563 if (i.tm.opcode_modifier.vecsib)
5565 if (i.index_reg->reg_num == RegEiz
5566 || i.index_reg->reg_num == RegRiz)
5569 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5572 i.sib.base = NO_BASE_REGISTER;
5573 i.sib.scale = i.log2_scale_factor;
5574 i.types[op].bitfield.disp8 = 0;
5575 i.types[op].bitfield.disp16 = 0;
5576 i.types[op].bitfield.disp64 = 0;
5577 if (flag_code != CODE_64BIT)
5579 /* Must be 32 bit */
5580 i.types[op].bitfield.disp32 = 1;
5581 i.types[op].bitfield.disp32s = 0;
5585 i.types[op].bitfield.disp32 = 0;
5586 i.types[op].bitfield.disp32s = 1;
5589 i.sib.index = i.index_reg->reg_num;
5590 if ((i.index_reg->reg_flags & RegRex) != 0)
5596 if (i.base_reg == 0)
5599 if (!i.disp_operands)
5601 fake_zero_displacement = 1;
5602 /* Instructions with VSIB byte need 32bit displacement
5603 if there is no base register. */
5604 if (i.tm.opcode_modifier.vecsib)
5605 i.types[op].bitfield.disp32 = 1;
5607 if (i.index_reg == 0)
5609 gas_assert (!i.tm.opcode_modifier.vecsib);
5610 /* Operand is just <disp> */
5611 if (flag_code == CODE_64BIT)
5613 /* 64bit mode overwrites the 32bit absolute
5614 addressing by RIP relative addressing and
5615 absolute addressing is encoded by one of the
5616 redundant SIB forms. */
5617 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5618 i.sib.base = NO_BASE_REGISTER;
5619 i.sib.index = NO_INDEX_REGISTER;
5620 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5621 ? disp32s : disp32);
5623 else if ((flag_code == CODE_16BIT)
5624 ^ (i.prefix[ADDR_PREFIX] != 0))
5626 i.rm.regmem = NO_BASE_REGISTER_16;
5627 i.types[op] = disp16;
5631 i.rm.regmem = NO_BASE_REGISTER;
5632 i.types[op] = disp32;
5635 else if (!i.tm.opcode_modifier.vecsib)
5637 /* !i.base_reg && i.index_reg */
5638 if (i.index_reg->reg_num == RegEiz
5639 || i.index_reg->reg_num == RegRiz)
5640 i.sib.index = NO_INDEX_REGISTER;
5642 i.sib.index = i.index_reg->reg_num;
5643 i.sib.base = NO_BASE_REGISTER;
5644 i.sib.scale = i.log2_scale_factor;
5645 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5646 i.types[op].bitfield.disp8 = 0;
5647 i.types[op].bitfield.disp16 = 0;
5648 i.types[op].bitfield.disp64 = 0;
5649 if (flag_code != CODE_64BIT)
5651 /* Must be 32 bit */
5652 i.types[op].bitfield.disp32 = 1;
5653 i.types[op].bitfield.disp32s = 0;
5657 i.types[op].bitfield.disp32 = 0;
5658 i.types[op].bitfield.disp32s = 1;
5660 if ((i.index_reg->reg_flags & RegRex) != 0)
5664 /* RIP addressing for 64bit mode. */
5665 else if (i.base_reg->reg_num == RegRip ||
5666 i.base_reg->reg_num == RegEip)
5668 gas_assert (!i.tm.opcode_modifier.vecsib);
5669 i.rm.regmem = NO_BASE_REGISTER;
5670 i.types[op].bitfield.disp8 = 0;
5671 i.types[op].bitfield.disp16 = 0;
5672 i.types[op].bitfield.disp32 = 0;
5673 i.types[op].bitfield.disp32s = 1;
5674 i.types[op].bitfield.disp64 = 0;
5675 i.flags[op] |= Operand_PCrel;
5676 if (! i.disp_operands)
5677 fake_zero_displacement = 1;
5679 else if (i.base_reg->reg_type.bitfield.reg16)
5681 gas_assert (!i.tm.opcode_modifier.vecsib);
5682 switch (i.base_reg->reg_num)
5685 if (i.index_reg == 0)
5687 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5688 i.rm.regmem = i.index_reg->reg_num - 6;
5692 if (i.index_reg == 0)
5695 if (operand_type_check (i.types[op], disp) == 0)
5697 /* fake (%bp) into 0(%bp) */
5698 i.types[op].bitfield.disp8 = 1;
5699 fake_zero_displacement = 1;
5702 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5703 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5705 default: /* (%si) -> 4 or (%di) -> 5 */
5706 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5708 i.rm.mode = mode_from_disp_size (i.types[op]);
5710 else /* i.base_reg and 32/64 bit mode */
5712 if (flag_code == CODE_64BIT
5713 && operand_type_check (i.types[op], disp))
5715 i386_operand_type temp;
5716 operand_type_set (&temp, 0);
5717 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5719 if (i.prefix[ADDR_PREFIX] == 0)
5720 i.types[op].bitfield.disp32s = 1;
5722 i.types[op].bitfield.disp32 = 1;
5725 if (!i.tm.opcode_modifier.vecsib)
5726 i.rm.regmem = i.base_reg->reg_num;
5727 if ((i.base_reg->reg_flags & RegRex) != 0)
5729 i.sib.base = i.base_reg->reg_num;
5730 /* x86-64 ignores REX prefix bit here to avoid decoder
5732 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5735 if (i.disp_operands == 0)
5737 fake_zero_displacement = 1;
5738 i.types[op].bitfield.disp8 = 1;
5741 else if (i.base_reg->reg_num == ESP_REG_NUM)
5745 i.sib.scale = i.log2_scale_factor;
5746 if (i.index_reg == 0)
5748 gas_assert (!i.tm.opcode_modifier.vecsib);
5749 /* <disp>(%esp) becomes two byte modrm with no index
5750 register. We've already stored the code for esp
5751 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5752 Any base register besides %esp will not use the
5753 extra modrm byte. */
5754 i.sib.index = NO_INDEX_REGISTER;
5756 else if (!i.tm.opcode_modifier.vecsib)
5758 if (i.index_reg->reg_num == RegEiz
5759 || i.index_reg->reg_num == RegRiz)
5760 i.sib.index = NO_INDEX_REGISTER;
5762 i.sib.index = i.index_reg->reg_num;
5763 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5764 if ((i.index_reg->reg_flags & RegRex) != 0)
5769 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5770 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5774 if (!fake_zero_displacement
5778 fake_zero_displacement = 1;
5779 if (i.disp_encoding == disp_encoding_8bit)
5780 i.types[op].bitfield.disp8 = 1;
5782 i.types[op].bitfield.disp32 = 1;
5784 i.rm.mode = mode_from_disp_size (i.types[op]);
5788 if (fake_zero_displacement)
5790 /* Fakes a zero displacement assuming that i.types[op]
5791 holds the correct displacement size. */
5794 gas_assert (i.op[op].disps == 0);
5795 exp = &disp_expressions[i.disp_operands++];
5796 i.op[op].disps = exp;
5797 exp->X_op = O_constant;
5798 exp->X_add_number = 0;
5799 exp->X_add_symbol = (symbolS *) 0;
5800 exp->X_op_symbol = (symbolS *) 0;
5808 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5810 if (operand_type_check (i.types[0], imm))
5811 i.vex.register_specifier = NULL;
5814 /* VEX.vvvv encodes one of the sources when the first
5815 operand is not an immediate. */
5816 if (i.tm.opcode_modifier.vexw == VEXW0)
5817 i.vex.register_specifier = i.op[0].regs;
5819 i.vex.register_specifier = i.op[1].regs;
5822 /* Destination is a XMM register encoded in the ModRM.reg
5824 i.rm.reg = i.op[2].regs->reg_num;
5825 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5828 /* ModRM.rm and VEX.B encodes the other source. */
5829 if (!i.mem_operands)
5833 if (i.tm.opcode_modifier.vexw == VEXW0)
5834 i.rm.regmem = i.op[1].regs->reg_num;
5836 i.rm.regmem = i.op[0].regs->reg_num;
5838 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5842 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5844 i.vex.register_specifier = i.op[2].regs;
5845 if (!i.mem_operands)
5848 i.rm.regmem = i.op[1].regs->reg_num;
5849 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5853 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5854 (if any) based on i.tm.extension_opcode. Again, we must be
5855 careful to make sure that segment/control/debug/test/MMX
5856 registers are coded into the i.rm.reg field. */
5857 else if (i.reg_operands)
5860 unsigned int vex_reg = ~0;
5862 for (op = 0; op < i.operands; op++)
5863 if (i.types[op].bitfield.reg8
5864 || i.types[op].bitfield.reg16
5865 || i.types[op].bitfield.reg32
5866 || i.types[op].bitfield.reg64
5867 || i.types[op].bitfield.regmmx
5868 || i.types[op].bitfield.regxmm
5869 || i.types[op].bitfield.regymm
5870 || i.types[op].bitfield.sreg2
5871 || i.types[op].bitfield.sreg3
5872 || i.types[op].bitfield.control
5873 || i.types[op].bitfield.debug
5874 || i.types[op].bitfield.test)
5879 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5881 /* For instructions with VexNDS, the register-only
5882 source operand is encoded in VEX prefix. */
5883 gas_assert (mem != (unsigned int) ~0);
5888 gas_assert (op < i.operands);
5892 /* Check register-only source operand when two source
5893 operands are swapped. */
5894 if (!i.tm.operand_types[op].bitfield.baseindex
5895 && i.tm.operand_types[op + 1].bitfield.baseindex)
5899 gas_assert (mem == (vex_reg + 1)
5900 && op < i.operands);
5905 gas_assert (vex_reg < i.operands);
5909 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5911 /* For instructions with VexNDD, the register destination
5912 is encoded in VEX prefix. */
5913 if (i.mem_operands == 0)
5915 /* There is no memory operand. */
5916 gas_assert ((op + 2) == i.operands);
5921 /* There are only 2 operands. */
5922 gas_assert (op < 2 && i.operands == 2);
5927 gas_assert (op < i.operands);
5929 if (vex_reg != (unsigned int) ~0)
5931 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5933 if (type->bitfield.reg32 != 1
5934 && type->bitfield.reg64 != 1
5935 && !operand_type_equal (type, ®xmm)
5936 && !operand_type_equal (type, ®ymm))
5939 i.vex.register_specifier = i.op[vex_reg].regs;
5942 /* Don't set OP operand twice. */
5945 /* If there is an extension opcode to put here, the
5946 register number must be put into the regmem field. */
5947 if (i.tm.extension_opcode != None)
5949 i.rm.regmem = i.op[op].regs->reg_num;
5950 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5955 i.rm.reg = i.op[op].regs->reg_num;
5956 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5961 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5962 must set it to 3 to indicate this is a register operand
5963 in the regmem field. */
5964 if (!i.mem_operands)
5968 /* Fill in i.rm.reg field with extension opcode (if any). */
5969 if (i.tm.extension_opcode != None)
5970 i.rm.reg = i.tm.extension_opcode;
5976 output_branch (void)
5982 relax_substateT subtype;
5986 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5987 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
5990 if (i.prefix[DATA_PREFIX] != 0)
5996 /* Pentium4 branch hints. */
5997 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5998 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6003 if (i.prefix[REX_PREFIX] != 0)
6009 if (i.prefixes != 0 && !intel_syntax)
6010 as_warn (_("skipping prefixes on this instruction"));
6012 /* It's always a symbol; End frag & setup for relax.
6013 Make sure there is enough room in this frag for the largest
6014 instruction we may generate in md_convert_frag. This is 2
6015 bytes for the opcode and room for the prefix and largest
6017 frag_grow (prefix + 2 + 4);
6018 /* Prefix and 1 opcode byte go in fr_fix. */
6019 p = frag_more (prefix + 1);
6020 if (i.prefix[DATA_PREFIX] != 0)
6021 *p++ = DATA_PREFIX_OPCODE;
6022 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6023 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6024 *p++ = i.prefix[SEG_PREFIX];
6025 if (i.prefix[REX_PREFIX] != 0)
6026 *p++ = i.prefix[REX_PREFIX];
6027 *p = i.tm.base_opcode;
6029 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6030 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6031 else if (cpu_arch_flags.bitfield.cpui386)
6032 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6034 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6037 sym = i.op[0].disps->X_add_symbol;
6038 off = i.op[0].disps->X_add_number;
6040 if (i.op[0].disps->X_op != O_constant
6041 && i.op[0].disps->X_op != O_symbol)
6043 /* Handle complex expressions. */
6044 sym = make_expr_symbol (i.op[0].disps);
6048 /* 1 possible extra opcode + 4 byte displacement go in var part.
6049 Pass reloc in fr_var. */
6050 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6060 if (i.tm.opcode_modifier.jumpbyte)
6062 /* This is a loop or jecxz type instruction. */
6064 if (i.prefix[ADDR_PREFIX] != 0)
6066 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6069 /* Pentium4 branch hints. */
6070 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6071 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6073 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6082 if (flag_code == CODE_16BIT)
6085 if (i.prefix[DATA_PREFIX] != 0)
6087 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6097 if (i.prefix[REX_PREFIX] != 0)
6099 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6103 if (i.prefixes != 0 && !intel_syntax)
6104 as_warn (_("skipping prefixes on this instruction"));
6106 p = frag_more (i.tm.opcode_length + size);
6107 switch (i.tm.opcode_length)
6110 *p++ = i.tm.base_opcode >> 8;
6112 *p++ = i.tm.base_opcode;
6118 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6119 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6121 /* All jumps handled here are signed, but don't use a signed limit
6122 check for 32 and 16 bit jumps as we want to allow wrap around at
6123 4G and 64k respectively. */
6125 fixP->fx_signed = 1;
6129 output_interseg_jump (void)
6137 if (flag_code == CODE_16BIT)
6141 if (i.prefix[DATA_PREFIX] != 0)
6147 if (i.prefix[REX_PREFIX] != 0)
6157 if (i.prefixes != 0 && !intel_syntax)
6158 as_warn (_("skipping prefixes on this instruction"));
6160 /* 1 opcode; 2 segment; offset */
6161 p = frag_more (prefix + 1 + 2 + size);
6163 if (i.prefix[DATA_PREFIX] != 0)
6164 *p++ = DATA_PREFIX_OPCODE;
6166 if (i.prefix[REX_PREFIX] != 0)
6167 *p++ = i.prefix[REX_PREFIX];
6169 *p++ = i.tm.base_opcode;
6170 if (i.op[1].imms->X_op == O_constant)
6172 offsetT n = i.op[1].imms->X_add_number;
6175 && !fits_in_unsigned_word (n)
6176 && !fits_in_signed_word (n))
6178 as_bad (_("16-bit jump out of range"));
6181 md_number_to_chars (p, n, size);
6184 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6185 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6186 if (i.op[0].imms->X_op != O_constant)
6187 as_bad (_("can't handle non absolute segment in `%s'"),
6189 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6195 fragS *insn_start_frag;
6196 offsetT insn_start_off;
6198 /* Tie dwarf2 debug info to the address at the start of the insn.
6199 We can't do this after the insn has been output as the current
6200 frag may have been closed off. eg. by frag_var. */
6201 dwarf2_emit_insn (0);
6203 insn_start_frag = frag_now;
6204 insn_start_off = frag_now_fix ();
6207 if (i.tm.opcode_modifier.jump)
6209 else if (i.tm.opcode_modifier.jumpbyte
6210 || i.tm.opcode_modifier.jumpdword)
6212 else if (i.tm.opcode_modifier.jumpintersegment)
6213 output_interseg_jump ();
6216 /* Output normal instructions here. */
6220 unsigned int prefix;
6222 /* Since the VEX prefix contains the implicit prefix, we don't
6223 need the explicit prefix. */
6224 if (!i.tm.opcode_modifier.vex)
6226 switch (i.tm.opcode_length)
6229 if (i.tm.base_opcode & 0xff000000)
6231 prefix = (i.tm.base_opcode >> 24) & 0xff;
6236 if ((i.tm.base_opcode & 0xff0000) != 0)
6238 prefix = (i.tm.base_opcode >> 16) & 0xff;
6239 if (i.tm.cpu_flags.bitfield.cpupadlock)
6242 if (prefix != REPE_PREFIX_OPCODE
6243 || (i.prefix[REP_PREFIX]
6244 != REPE_PREFIX_OPCODE))
6245 add_prefix (prefix);
6248 add_prefix (prefix);
6257 /* The prefix bytes. */
6258 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6260 FRAG_APPEND_1_CHAR (*q);
6264 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6269 /* REX byte is encoded in VEX prefix. */
6273 FRAG_APPEND_1_CHAR (*q);
6276 /* There should be no other prefixes for instructions
6281 /* Now the VEX prefix. */
6282 p = frag_more (i.vex.length);
6283 for (j = 0; j < i.vex.length; j++)
6284 p[j] = i.vex.bytes[j];
6287 /* Now the opcode; be careful about word order here! */
6288 if (i.tm.opcode_length == 1)
6290 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6294 switch (i.tm.opcode_length)
6298 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6308 /* Put out high byte first: can't use md_number_to_chars! */
6309 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6310 *p = i.tm.base_opcode & 0xff;
6313 /* Now the modrm byte and sib byte (if present). */
6314 if (i.tm.opcode_modifier.modrm)
6316 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6319 /* If i.rm.regmem == ESP (4)
6320 && i.rm.mode != (Register mode)
6322 ==> need second modrm byte. */
6323 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6325 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6326 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6328 | i.sib.scale << 6));
6331 if (i.disp_operands)
6332 output_disp (insn_start_frag, insn_start_off);
6335 output_imm (insn_start_frag, insn_start_off);
6341 pi ("" /*line*/, &i);
6343 #endif /* DEBUG386 */
6346 /* Return the size of the displacement operand N. */
6349 disp_size (unsigned int n)
6352 if (i.types[n].bitfield.disp64)
6354 else if (i.types[n].bitfield.disp8)
6356 else if (i.types[n].bitfield.disp16)
6361 /* Return the size of the immediate operand N. */
6364 imm_size (unsigned int n)
6367 if (i.types[n].bitfield.imm64)
6369 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6371 else if (i.types[n].bitfield.imm16)
6377 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6382 for (n = 0; n < i.operands; n++)
6384 if (operand_type_check (i.types[n], disp))
6386 if (i.op[n].disps->X_op == O_constant)
6388 int size = disp_size (n);
6391 val = offset_in_range (i.op[n].disps->X_add_number,
6393 p = frag_more (size);
6394 md_number_to_chars (p, val, size);
6398 enum bfd_reloc_code_real reloc_type;
6399 int size = disp_size (n);
6400 int sign = i.types[n].bitfield.disp32s;
6401 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6403 /* We can't have 8 bit displacement here. */
6404 gas_assert (!i.types[n].bitfield.disp8);
6406 /* The PC relative address is computed relative
6407 to the instruction boundary, so in case immediate
6408 fields follows, we need to adjust the value. */
6409 if (pcrel && i.imm_operands)
6414 for (n1 = 0; n1 < i.operands; n1++)
6415 if (operand_type_check (i.types[n1], imm))
6417 /* Only one immediate is allowed for PC
6418 relative address. */
6419 gas_assert (sz == 0);
6421 i.op[n].disps->X_add_number -= sz;
6423 /* We should find the immediate. */
6424 gas_assert (sz != 0);
6427 p = frag_more (size);
6428 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6430 && GOT_symbol == i.op[n].disps->X_add_symbol
6431 && (((reloc_type == BFD_RELOC_32
6432 || reloc_type == BFD_RELOC_X86_64_32S
6433 || (reloc_type == BFD_RELOC_64
6435 && (i.op[n].disps->X_op == O_symbol
6436 || (i.op[n].disps->X_op == O_add
6437 && ((symbol_get_value_expression
6438 (i.op[n].disps->X_op_symbol)->X_op)
6440 || reloc_type == BFD_RELOC_32_PCREL))
6444 if (insn_start_frag == frag_now)
6445 add = (p - frag_now->fr_literal) - insn_start_off;
6450 add = insn_start_frag->fr_fix - insn_start_off;
6451 for (fr = insn_start_frag->fr_next;
6452 fr && fr != frag_now; fr = fr->fr_next)
6454 add += p - frag_now->fr_literal;
6459 reloc_type = BFD_RELOC_386_GOTPC;
6460 i.op[n].imms->X_add_number += add;
6462 else if (reloc_type == BFD_RELOC_64)
6463 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6465 /* Don't do the adjustment for x86-64, as there
6466 the pcrel addressing is relative to the _next_
6467 insn, and that is taken care of in other code. */
6468 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6470 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6471 i.op[n].disps, pcrel, reloc_type);
6478 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6483 for (n = 0; n < i.operands; n++)
6485 if (operand_type_check (i.types[n], imm))
6487 if (i.op[n].imms->X_op == O_constant)
6489 int size = imm_size (n);
6492 val = offset_in_range (i.op[n].imms->X_add_number,
6494 p = frag_more (size);
6495 md_number_to_chars (p, val, size);
6499 /* Not absolute_section.
6500 Need a 32-bit fixup (don't support 8bit
6501 non-absolute imms). Try to support other
6503 enum bfd_reloc_code_real reloc_type;
6504 int size = imm_size (n);
6507 if (i.types[n].bitfield.imm32s
6508 && (i.suffix == QWORD_MNEM_SUFFIX
6509 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6514 p = frag_more (size);
6515 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6517 /* This is tough to explain. We end up with this one if we
6518 * have operands that look like
6519 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6520 * obtain the absolute address of the GOT, and it is strongly
6521 * preferable from a performance point of view to avoid using
6522 * a runtime relocation for this. The actual sequence of
6523 * instructions often look something like:
6528 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6530 * The call and pop essentially return the absolute address
6531 * of the label .L66 and store it in %ebx. The linker itself
6532 * will ultimately change the first operand of the addl so
6533 * that %ebx points to the GOT, but to keep things simple, the
6534 * .o file must have this operand set so that it generates not
6535 * the absolute address of .L66, but the absolute address of
6536 * itself. This allows the linker itself simply treat a GOTPC
6537 * relocation as asking for a pcrel offset to the GOT to be
6538 * added in, and the addend of the relocation is stored in the
6539 * operand field for the instruction itself.
6541 * Our job here is to fix the operand so that it would add
6542 * the correct offset so that %ebx would point to itself. The
6543 * thing that is tricky is that .-.L66 will point to the
6544 * beginning of the instruction, so we need to further modify
6545 * the operand so that it will point to itself. There are
6546 * other cases where you have something like:
6548 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6550 * and here no correction would be required. Internally in
6551 * the assembler we treat operands of this form as not being
6552 * pcrel since the '.' is explicitly mentioned, and I wonder
6553 * whether it would simplify matters to do it this way. Who
6554 * knows. In earlier versions of the PIC patches, the
6555 * pcrel_adjust field was used to store the correction, but
6556 * since the expression is not pcrel, I felt it would be
6557 * confusing to do it this way. */
6559 if ((reloc_type == BFD_RELOC_32
6560 || reloc_type == BFD_RELOC_X86_64_32S
6561 || reloc_type == BFD_RELOC_64)
6563 && GOT_symbol == i.op[n].imms->X_add_symbol
6564 && (i.op[n].imms->X_op == O_symbol
6565 || (i.op[n].imms->X_op == O_add
6566 && ((symbol_get_value_expression
6567 (i.op[n].imms->X_op_symbol)->X_op)
6572 if (insn_start_frag == frag_now)
6573 add = (p - frag_now->fr_literal) - insn_start_off;
6578 add = insn_start_frag->fr_fix - insn_start_off;
6579 for (fr = insn_start_frag->fr_next;
6580 fr && fr != frag_now; fr = fr->fr_next)
6582 add += p - frag_now->fr_literal;
6586 reloc_type = BFD_RELOC_386_GOTPC;
6588 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6590 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6591 i.op[n].imms->X_add_number += add;
6593 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6594 i.op[n].imms, 0, reloc_type);
6600 /* x86_cons_fix_new is called via the expression parsing code when a
6601 reloc is needed. We use this hook to get the correct .got reloc. */
6602 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6603 static int cons_sign = -1;
6606 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6609 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6611 got_reloc = NO_RELOC;
6614 if (exp->X_op == O_secrel)
6616 exp->X_op = O_symbol;
6617 r = BFD_RELOC_32_SECREL;
6621 fix_new_exp (frag, off, len, exp, 0, r);
6624 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6625 purpose of the `.dc.a' internal pseudo-op. */
6628 x86_address_bytes (void)
6630 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6632 return stdoutput->arch_info->bits_per_address / 8;
6635 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6637 # define lex_got(reloc, adjust, types) NULL
6639 /* Parse operands of the form
6640 <symbol>@GOTOFF+<nnn>
6641 and similar .plt or .got references.
6643 If we find one, set up the correct relocation in RELOC and copy the
6644 input string, minus the `@GOTOFF' into a malloc'd buffer for
6645 parsing by the calling routine. Return this buffer, and if ADJUST
6646 is non-null set it to the length of the string we removed from the
6647 input line. Otherwise return NULL. */
6649 lex_got (enum bfd_reloc_code_real *rel,
6651 i386_operand_type *types)
6653 /* Some of the relocations depend on the size of what field is to
6654 be relocated. But in our callers i386_immediate and i386_displacement
6655 we don't yet know the operand size (this will be set by insn
6656 matching). Hence we record the word32 relocation here,
6657 and adjust the reloc according to the real size in reloc(). */
6658 static const struct {
6661 const enum bfd_reloc_code_real rel[2];
6662 const i386_operand_type types64;
6664 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6665 BFD_RELOC_X86_64_PLTOFF64 },
6666 OPERAND_TYPE_IMM64 },
6667 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6668 BFD_RELOC_X86_64_PLT32 },
6669 OPERAND_TYPE_IMM32_32S_DISP32 },
6670 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6671 BFD_RELOC_X86_64_GOTPLT64 },
6672 OPERAND_TYPE_IMM64_DISP64 },
6673 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6674 BFD_RELOC_X86_64_GOTOFF64 },
6675 OPERAND_TYPE_IMM64_DISP64 },
6676 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6677 BFD_RELOC_X86_64_GOTPCREL },
6678 OPERAND_TYPE_IMM32_32S_DISP32 },
6679 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6680 BFD_RELOC_X86_64_TLSGD },
6681 OPERAND_TYPE_IMM32_32S_DISP32 },
6682 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6683 _dummy_first_bfd_reloc_code_real },
6684 OPERAND_TYPE_NONE },
6685 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6686 BFD_RELOC_X86_64_TLSLD },
6687 OPERAND_TYPE_IMM32_32S_DISP32 },
6688 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6689 BFD_RELOC_X86_64_GOTTPOFF },
6690 OPERAND_TYPE_IMM32_32S_DISP32 },
6691 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6692 BFD_RELOC_X86_64_TPOFF32 },
6693 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6694 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6695 _dummy_first_bfd_reloc_code_real },
6696 OPERAND_TYPE_NONE },
6697 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6698 BFD_RELOC_X86_64_DTPOFF32 },
6699 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6700 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6701 _dummy_first_bfd_reloc_code_real },
6702 OPERAND_TYPE_NONE },
6703 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6704 _dummy_first_bfd_reloc_code_real },
6705 OPERAND_TYPE_NONE },
6706 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6707 BFD_RELOC_X86_64_GOT32 },
6708 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6709 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6710 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6711 OPERAND_TYPE_IMM32_32S_DISP32 },
6712 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6713 BFD_RELOC_X86_64_TLSDESC_CALL },
6714 OPERAND_TYPE_IMM32_32S_DISP32 },
6719 #if defined (OBJ_MAYBE_ELF)
6724 for (cp = input_line_pointer; *cp != '@'; cp++)
6725 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6728 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6730 int len = gotrel[j].len;
6731 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6733 if (gotrel[j].rel[object_64bit] != 0)
6736 char *tmpbuf, *past_reloc;
6738 *rel = gotrel[j].rel[object_64bit];
6744 if (flag_code != CODE_64BIT)
6746 types->bitfield.imm32 = 1;
6747 types->bitfield.disp32 = 1;
6750 *types = gotrel[j].types64;
6753 if (GOT_symbol == NULL)
6754 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6756 /* The length of the first part of our input line. */
6757 first = cp - input_line_pointer;
6759 /* The second part goes from after the reloc token until
6760 (and including) an end_of_line char or comma. */
6761 past_reloc = cp + 1 + len;
6763 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6765 second = cp + 1 - past_reloc;
6767 /* Allocate and copy string. The trailing NUL shouldn't
6768 be necessary, but be safe. */
6769 tmpbuf = (char *) xmalloc (first + second + 2);
6770 memcpy (tmpbuf, input_line_pointer, first);
6771 if (second != 0 && *past_reloc != ' ')
6772 /* Replace the relocation token with ' ', so that
6773 errors like foo@GOTOFF1 will be detected. */
6774 tmpbuf[first++] = ' ';
6775 memcpy (tmpbuf + first, past_reloc, second);
6776 tmpbuf[first + second] = '\0';
6780 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6781 gotrel[j].str, 1 << (5 + object_64bit));
6786 /* Might be a symbol version string. Don't as_bad here. */
6792 x86_cons (expressionS *exp, int size)
6794 intel_syntax = -intel_syntax;
6797 if (size == 4 || (object_64bit && size == 8))
6799 /* Handle @GOTOFF and the like in an expression. */
6801 char *gotfree_input_line;
6804 save = input_line_pointer;
6805 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6806 if (gotfree_input_line)
6807 input_line_pointer = gotfree_input_line;
6811 if (gotfree_input_line)
6813 /* expression () has merrily parsed up to the end of line,
6814 or a comma - in the wrong buffer. Transfer how far
6815 input_line_pointer has moved to the right buffer. */
6816 input_line_pointer = (save
6817 + (input_line_pointer - gotfree_input_line)
6819 free (gotfree_input_line);
6820 if (exp->X_op == O_constant
6821 || exp->X_op == O_absent
6822 || exp->X_op == O_illegal
6823 || exp->X_op == O_register
6824 || exp->X_op == O_big)
6826 char c = *input_line_pointer;
6827 *input_line_pointer = 0;
6828 as_bad (_("missing or invalid expression `%s'"), save);
6829 *input_line_pointer = c;
6836 intel_syntax = -intel_syntax;
6839 i386_intel_simplify (exp);
6843 signed_cons (int size)
6845 if (flag_code == CODE_64BIT)
6853 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6860 if (exp.X_op == O_symbol)
6861 exp.X_op = O_secrel;
6863 emit_expr (&exp, 4);
6865 while (*input_line_pointer++ == ',');
6867 input_line_pointer--;
6868 demand_empty_rest_of_line ();
6873 i386_immediate (char *imm_start)
6875 char *save_input_line_pointer;
6876 char *gotfree_input_line;
6879 i386_operand_type types;
6881 operand_type_set (&types, ~0);
6883 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6885 as_bad (_("at most %d immediate operands are allowed"),
6886 MAX_IMMEDIATE_OPERANDS);
6890 exp = &im_expressions[i.imm_operands++];
6891 i.op[this_operand].imms = exp;
6893 if (is_space_char (*imm_start))
6896 save_input_line_pointer = input_line_pointer;
6897 input_line_pointer = imm_start;
6899 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6900 if (gotfree_input_line)
6901 input_line_pointer = gotfree_input_line;
6903 exp_seg = expression (exp);
6906 if (*input_line_pointer)
6907 as_bad (_("junk `%s' after expression"), input_line_pointer);
6909 input_line_pointer = save_input_line_pointer;
6910 if (gotfree_input_line)
6912 free (gotfree_input_line);
6914 if (exp->X_op == O_constant || exp->X_op == O_register)
6915 exp->X_op = O_illegal;
6918 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6922 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6923 i386_operand_type types, const char *imm_start)
6925 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6928 as_bad (_("missing or invalid immediate expression `%s'"),
6932 else if (exp->X_op == O_constant)
6934 /* Size it properly later. */
6935 i.types[this_operand].bitfield.imm64 = 1;
6936 /* If not 64bit, sign extend val. */
6937 if (flag_code != CODE_64BIT
6938 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6940 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6942 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6943 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6944 && exp_seg != absolute_section
6945 && exp_seg != text_section
6946 && exp_seg != data_section
6947 && exp_seg != bss_section
6948 && exp_seg != undefined_section
6949 && !bfd_is_com_section (exp_seg))
6951 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6955 else if (!intel_syntax && exp->X_op == O_register)
6958 as_bad (_("illegal immediate register operand %s"), imm_start);
6963 /* This is an address. The size of the address will be
6964 determined later, depending on destination register,
6965 suffix, or the default for the section. */
6966 i.types[this_operand].bitfield.imm8 = 1;
6967 i.types[this_operand].bitfield.imm16 = 1;
6968 i.types[this_operand].bitfield.imm32 = 1;
6969 i.types[this_operand].bitfield.imm32s = 1;
6970 i.types[this_operand].bitfield.imm64 = 1;
6971 i.types[this_operand] = operand_type_and (i.types[this_operand],
6979 i386_scale (char *scale)
6982 char *save = input_line_pointer;
6984 input_line_pointer = scale;
6985 val = get_absolute_expression ();
6990 i.log2_scale_factor = 0;
6993 i.log2_scale_factor = 1;
6996 i.log2_scale_factor = 2;
6999 i.log2_scale_factor = 3;
7003 char sep = *input_line_pointer;
7005 *input_line_pointer = '\0';
7006 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7008 *input_line_pointer = sep;
7009 input_line_pointer = save;
7013 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7015 as_warn (_("scale factor of %d without an index register"),
7016 1 << i.log2_scale_factor);
7017 i.log2_scale_factor = 0;
7019 scale = input_line_pointer;
7020 input_line_pointer = save;
7025 i386_displacement (char *disp_start, char *disp_end)
7029 char *save_input_line_pointer;
7030 char *gotfree_input_line;
7032 i386_operand_type bigdisp, types = anydisp;
7035 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7037 as_bad (_("at most %d displacement operands are allowed"),
7038 MAX_MEMORY_OPERANDS);
7042 operand_type_set (&bigdisp, 0);
7043 if ((i.types[this_operand].bitfield.jumpabsolute)
7044 || (!current_templates->start->opcode_modifier.jump
7045 && !current_templates->start->opcode_modifier.jumpdword))
7047 bigdisp.bitfield.disp32 = 1;
7048 override = (i.prefix[ADDR_PREFIX] != 0);
7049 if (flag_code == CODE_64BIT)
7053 bigdisp.bitfield.disp32s = 1;
7054 bigdisp.bitfield.disp64 = 1;
7057 else if ((flag_code == CODE_16BIT) ^ override)
7059 bigdisp.bitfield.disp32 = 0;
7060 bigdisp.bitfield.disp16 = 1;
7065 /* For PC-relative branches, the width of the displacement
7066 is dependent upon data size, not address size. */
7067 override = (i.prefix[DATA_PREFIX] != 0);
7068 if (flag_code == CODE_64BIT)
7070 if (override || i.suffix == WORD_MNEM_SUFFIX)
7071 bigdisp.bitfield.disp16 = 1;
7074 bigdisp.bitfield.disp32 = 1;
7075 bigdisp.bitfield.disp32s = 1;
7081 override = (i.suffix == (flag_code != CODE_16BIT
7083 : LONG_MNEM_SUFFIX));
7084 bigdisp.bitfield.disp32 = 1;
7085 if ((flag_code == CODE_16BIT) ^ override)
7087 bigdisp.bitfield.disp32 = 0;
7088 bigdisp.bitfield.disp16 = 1;
7092 i.types[this_operand] = operand_type_or (i.types[this_operand],
7095 exp = &disp_expressions[i.disp_operands];
7096 i.op[this_operand].disps = exp;
7098 save_input_line_pointer = input_line_pointer;
7099 input_line_pointer = disp_start;
7100 END_STRING_AND_SAVE (disp_end);
7102 #ifndef GCC_ASM_O_HACK
7103 #define GCC_ASM_O_HACK 0
7106 END_STRING_AND_SAVE (disp_end + 1);
7107 if (i.types[this_operand].bitfield.baseIndex
7108 && displacement_string_end[-1] == '+')
7110 /* This hack is to avoid a warning when using the "o"
7111 constraint within gcc asm statements.
7114 #define _set_tssldt_desc(n,addr,limit,type) \
7115 __asm__ __volatile__ ( \
7117 "movw %w1,2+%0\n\t" \
7119 "movb %b1,4+%0\n\t" \
7120 "movb %4,5+%0\n\t" \
7121 "movb $0,6+%0\n\t" \
7122 "movb %h1,7+%0\n\t" \
7124 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7126 This works great except that the output assembler ends
7127 up looking a bit weird if it turns out that there is
7128 no offset. You end up producing code that looks like:
7141 So here we provide the missing zero. */
7143 *displacement_string_end = '0';
7146 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7147 if (gotfree_input_line)
7148 input_line_pointer = gotfree_input_line;
7150 exp_seg = expression (exp);
7153 if (*input_line_pointer)
7154 as_bad (_("junk `%s' after expression"), input_line_pointer);
7156 RESTORE_END_STRING (disp_end + 1);
7158 input_line_pointer = save_input_line_pointer;
7159 if (gotfree_input_line)
7161 free (gotfree_input_line);
7163 if (exp->X_op == O_constant || exp->X_op == O_register)
7164 exp->X_op = O_illegal;
7167 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7169 RESTORE_END_STRING (disp_end);
7175 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7176 i386_operand_type types, const char *disp_start)
7178 i386_operand_type bigdisp;
7181 /* We do this to make sure that the section symbol is in
7182 the symbol table. We will ultimately change the relocation
7183 to be relative to the beginning of the section. */
7184 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7185 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7186 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7188 if (exp->X_op != O_symbol)
7191 if (S_IS_LOCAL (exp->X_add_symbol)
7192 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7193 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7194 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7195 exp->X_op = O_subtract;
7196 exp->X_op_symbol = GOT_symbol;
7197 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7198 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7199 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7200 i.reloc[this_operand] = BFD_RELOC_64;
7202 i.reloc[this_operand] = BFD_RELOC_32;
7205 else if (exp->X_op == O_absent
7206 || exp->X_op == O_illegal
7207 || exp->X_op == O_big)
7210 as_bad (_("missing or invalid displacement expression `%s'"),
7215 else if (flag_code == CODE_64BIT
7216 && !i.prefix[ADDR_PREFIX]
7217 && exp->X_op == O_constant)
7219 /* Since displacement is signed extended to 64bit, don't allow
7220 disp32 and turn off disp32s if they are out of range. */
7221 i.types[this_operand].bitfield.disp32 = 0;
7222 if (!fits_in_signed_long (exp->X_add_number))
7224 i.types[this_operand].bitfield.disp32s = 0;
7225 if (i.types[this_operand].bitfield.baseindex)
7227 as_bad (_("0x%lx out range of signed 32bit displacement"),
7228 (long) exp->X_add_number);
7234 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7235 else if (exp->X_op != O_constant
7236 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7237 && exp_seg != absolute_section
7238 && exp_seg != text_section
7239 && exp_seg != data_section
7240 && exp_seg != bss_section
7241 && exp_seg != undefined_section
7242 && !bfd_is_com_section (exp_seg))
7244 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7249 /* Check if this is a displacement only operand. */
7250 bigdisp = i.types[this_operand];
7251 bigdisp.bitfield.disp8 = 0;
7252 bigdisp.bitfield.disp16 = 0;
7253 bigdisp.bitfield.disp32 = 0;
7254 bigdisp.bitfield.disp32s = 0;
7255 bigdisp.bitfield.disp64 = 0;
7256 if (operand_type_all_zero (&bigdisp))
7257 i.types[this_operand] = operand_type_and (i.types[this_operand],
7263 /* Make sure the memory operand we've been dealt is valid.
7264 Return 1 on success, 0 on a failure. */
7267 i386_index_check (const char *operand_string)
7270 const char *kind = "base/index";
7271 #if INFER_ADDR_PREFIX
7277 if (current_templates->start->opcode_modifier.isstring
7278 && !current_templates->start->opcode_modifier.immext
7279 && (current_templates->end[-1].opcode_modifier.isstring
7282 /* Memory operands of string insns are special in that they only allow
7283 a single register (rDI, rSI, or rBX) as their memory address. */
7284 unsigned int expected;
7286 kind = "string address";
7288 if (current_templates->start->opcode_modifier.w)
7290 i386_operand_type type = current_templates->end[-1].operand_types[0];
7292 if (!type.bitfield.baseindex
7293 || ((!i.mem_operands != !intel_syntax)
7294 && current_templates->end[-1].operand_types[1]
7295 .bitfield.baseindex))
7296 type = current_templates->end[-1].operand_types[1];
7297 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7300 expected = 3 /* rBX */;
7302 if (!i.base_reg || i.index_reg
7303 || operand_type_check (i.types[this_operand], disp))
7305 else if (!(flag_code == CODE_64BIT
7306 ? i.prefix[ADDR_PREFIX]
7307 ? i.base_reg->reg_type.bitfield.reg32
7308 : i.base_reg->reg_type.bitfield.reg64
7309 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7310 ? i.base_reg->reg_type.bitfield.reg32
7311 : i.base_reg->reg_type.bitfield.reg16))
7313 else if (i.base_reg->reg_num != expected)
7320 for (j = 0; j < i386_regtab_size; ++j)
7321 if ((flag_code == CODE_64BIT
7322 ? i.prefix[ADDR_PREFIX]
7323 ? i386_regtab[j].reg_type.bitfield.reg32
7324 : i386_regtab[j].reg_type.bitfield.reg64
7325 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7326 ? i386_regtab[j].reg_type.bitfield.reg32
7327 : i386_regtab[j].reg_type.bitfield.reg16)
7328 && i386_regtab[j].reg_num == expected)
7330 gas_assert (j < i386_regtab_size);
7331 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7333 intel_syntax ? '[' : '(',
7335 i386_regtab[j].reg_name,
7336 intel_syntax ? ']' : ')');
7340 else if (flag_code == CODE_64BIT)
7343 && ((i.prefix[ADDR_PREFIX] == 0
7344 && !i.base_reg->reg_type.bitfield.reg64)
7345 || (i.prefix[ADDR_PREFIX]
7346 && !i.base_reg->reg_type.bitfield.reg32))
7348 || i.base_reg->reg_num !=
7349 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7351 && !(i.index_reg->reg_type.bitfield.regxmm
7352 || i.index_reg->reg_type.bitfield.regymm)
7353 && (!i.index_reg->reg_type.bitfield.baseindex
7354 || (i.prefix[ADDR_PREFIX] == 0
7355 && i.index_reg->reg_num != RegRiz
7356 && !i.index_reg->reg_type.bitfield.reg64
7358 || (i.prefix[ADDR_PREFIX]
7359 && i.index_reg->reg_num != RegEiz
7360 && !i.index_reg->reg_type.bitfield.reg32))))
7365 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7369 && (!i.base_reg->reg_type.bitfield.reg16
7370 || !i.base_reg->reg_type.bitfield.baseindex))
7372 && (!i.index_reg->reg_type.bitfield.reg16
7373 || !i.index_reg->reg_type.bitfield.baseindex
7375 && i.base_reg->reg_num < 6
7376 && i.index_reg->reg_num >= 6
7377 && i.log2_scale_factor == 0))))
7384 && !i.base_reg->reg_type.bitfield.reg32)
7386 && !i.index_reg->reg_type.bitfield.regxmm
7387 && !i.index_reg->reg_type.bitfield.regymm
7388 && ((!i.index_reg->reg_type.bitfield.reg32
7389 && i.index_reg->reg_num != RegEiz)
7390 || !i.index_reg->reg_type.bitfield.baseindex)))
7396 #if INFER_ADDR_PREFIX
7397 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7399 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7401 /* Change the size of any displacement too. At most one of
7402 Disp16 or Disp32 is set.
7403 FIXME. There doesn't seem to be any real need for separate
7404 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7405 Removing them would probably clean up the code quite a lot. */
7406 if (flag_code != CODE_64BIT
7407 && (i.types[this_operand].bitfield.disp16
7408 || i.types[this_operand].bitfield.disp32))
7409 i.types[this_operand]
7410 = operand_type_xor (i.types[this_operand], disp16_32);
7415 as_bad (_("`%s' is not a valid %s expression"),
7420 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7422 flag_code_names[i.prefix[ADDR_PREFIX]
7423 ? flag_code == CODE_32BIT
7432 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7436 i386_att_operand (char *operand_string)
7440 char *op_string = operand_string;
7442 if (is_space_char (*op_string))
7445 /* We check for an absolute prefix (differentiating,
7446 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7447 if (*op_string == ABSOLUTE_PREFIX)
7450 if (is_space_char (*op_string))
7452 i.types[this_operand].bitfield.jumpabsolute = 1;
7455 /* Check if operand is a register. */
7456 if ((r = parse_register (op_string, &end_op)) != NULL)
7458 i386_operand_type temp;
7460 /* Check for a segment override by searching for ':' after a
7461 segment register. */
7463 if (is_space_char (*op_string))
7465 if (*op_string == ':'
7466 && (r->reg_type.bitfield.sreg2
7467 || r->reg_type.bitfield.sreg3))
7472 i.seg[i.mem_operands] = &es;
7475 i.seg[i.mem_operands] = &cs;
7478 i.seg[i.mem_operands] = &ss;
7481 i.seg[i.mem_operands] = &ds;
7484 i.seg[i.mem_operands] = &fs;
7487 i.seg[i.mem_operands] = &gs;
7491 /* Skip the ':' and whitespace. */
7493 if (is_space_char (*op_string))
7496 if (!is_digit_char (*op_string)
7497 && !is_identifier_char (*op_string)
7498 && *op_string != '('
7499 && *op_string != ABSOLUTE_PREFIX)
7501 as_bad (_("bad memory operand `%s'"), op_string);
7504 /* Handle case of %es:*foo. */
7505 if (*op_string == ABSOLUTE_PREFIX)
7508 if (is_space_char (*op_string))
7510 i.types[this_operand].bitfield.jumpabsolute = 1;
7512 goto do_memory_reference;
7516 as_bad (_("junk `%s' after register"), op_string);
7520 temp.bitfield.baseindex = 0;
7521 i.types[this_operand] = operand_type_or (i.types[this_operand],
7523 i.types[this_operand].bitfield.unspecified = 0;
7524 i.op[this_operand].regs = r;
7527 else if (*op_string == REGISTER_PREFIX)
7529 as_bad (_("bad register name `%s'"), op_string);
7532 else if (*op_string == IMMEDIATE_PREFIX)
7535 if (i.types[this_operand].bitfield.jumpabsolute)
7537 as_bad (_("immediate operand illegal with absolute jump"));
7540 if (!i386_immediate (op_string))
7543 else if (is_digit_char (*op_string)
7544 || is_identifier_char (*op_string)
7545 || *op_string == '(')
7547 /* This is a memory reference of some sort. */
7550 /* Start and end of displacement string expression (if found). */
7551 char *displacement_string_start;
7552 char *displacement_string_end;
7554 do_memory_reference:
7555 if ((i.mem_operands == 1
7556 && !current_templates->start->opcode_modifier.isstring)
7557 || i.mem_operands == 2)
7559 as_bad (_("too many memory references for `%s'"),
7560 current_templates->start->name);
7564 /* Check for base index form. We detect the base index form by
7565 looking for an ')' at the end of the operand, searching
7566 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7568 base_string = op_string + strlen (op_string);
7571 if (is_space_char (*base_string))
7574 /* If we only have a displacement, set-up for it to be parsed later. */
7575 displacement_string_start = op_string;
7576 displacement_string_end = base_string + 1;
7578 if (*base_string == ')')
7581 unsigned int parens_balanced = 1;
7582 /* We've already checked that the number of left & right ()'s are
7583 equal, so this loop will not be infinite. */
7587 if (*base_string == ')')
7589 if (*base_string == '(')
7592 while (parens_balanced);
7594 temp_string = base_string;
7596 /* Skip past '(' and whitespace. */
7598 if (is_space_char (*base_string))
7601 if (*base_string == ','
7602 || ((i.base_reg = parse_register (base_string, &end_op))
7605 displacement_string_end = temp_string;
7607 i.types[this_operand].bitfield.baseindex = 1;
7611 base_string = end_op;
7612 if (is_space_char (*base_string))
7616 /* There may be an index reg or scale factor here. */
7617 if (*base_string == ',')
7620 if (is_space_char (*base_string))
7623 if ((i.index_reg = parse_register (base_string, &end_op))
7626 base_string = end_op;
7627 if (is_space_char (*base_string))
7629 if (*base_string == ',')
7632 if (is_space_char (*base_string))
7635 else if (*base_string != ')')
7637 as_bad (_("expecting `,' or `)' "
7638 "after index register in `%s'"),
7643 else if (*base_string == REGISTER_PREFIX)
7645 as_bad (_("bad register name `%s'"), base_string);
7649 /* Check for scale factor. */
7650 if (*base_string != ')')
7652 char *end_scale = i386_scale (base_string);
7657 base_string = end_scale;
7658 if (is_space_char (*base_string))
7660 if (*base_string != ')')
7662 as_bad (_("expecting `)' "
7663 "after scale factor in `%s'"),
7668 else if (!i.index_reg)
7670 as_bad (_("expecting index register or scale factor "
7671 "after `,'; got '%c'"),
7676 else if (*base_string != ')')
7678 as_bad (_("expecting `,' or `)' "
7679 "after base register in `%s'"),
7684 else if (*base_string == REGISTER_PREFIX)
7686 as_bad (_("bad register name `%s'"), base_string);
7691 /* If there's an expression beginning the operand, parse it,
7692 assuming displacement_string_start and
7693 displacement_string_end are meaningful. */
7694 if (displacement_string_start != displacement_string_end)
7696 if (!i386_displacement (displacement_string_start,
7697 displacement_string_end))
7701 /* Special case for (%dx) while doing input/output op. */
7703 && operand_type_equal (&i.base_reg->reg_type,
7704 ®16_inoutportreg)
7706 && i.log2_scale_factor == 0
7707 && i.seg[i.mem_operands] == 0
7708 && !operand_type_check (i.types[this_operand], disp))
7710 i.types[this_operand] = inoutportreg;
7714 if (i386_index_check (operand_string) == 0)
7716 i.types[this_operand].bitfield.mem = 1;
7721 /* It's not a memory operand; argh! */
7722 as_bad (_("invalid char %s beginning operand %d `%s'"),
7723 output_invalid (*op_string),
7728 return 1; /* Normal return. */
7731 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7732 that an rs_machine_dependent frag may reach. */
7735 i386_frag_max_var (fragS *frag)
7737 /* The only relaxable frags are for jumps.
7738 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7739 gas_assert (frag->fr_type == rs_machine_dependent);
7740 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7743 /* md_estimate_size_before_relax()
7745 Called just before relax() for rs_machine_dependent frags. The x86
7746 assembler uses these frags to handle variable size jump
7749 Any symbol that is now undefined will not become defined.
7750 Return the correct fr_subtype in the frag.
7751 Return the initial "guess for variable size of frag" to caller.
7752 The guess is actually the growth beyond the fixed part. Whatever
7753 we do to grow the fixed or variable part contributes to our
7757 md_estimate_size_before_relax (fragS *fragP, segT segment)
7759 /* We've already got fragP->fr_subtype right; all we have to do is
7760 check for un-relaxable symbols. On an ELF system, we can't relax
7761 an externally visible symbol, because it may be overridden by a
7763 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7764 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7766 && (S_IS_EXTERNAL (fragP->fr_symbol)
7767 || S_IS_WEAK (fragP->fr_symbol)
7768 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7769 & BSF_GNU_INDIRECT_FUNCTION))))
7771 #if defined (OBJ_COFF) && defined (TE_PE)
7772 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7773 && S_IS_WEAK (fragP->fr_symbol))
7777 /* Symbol is undefined in this segment, or we need to keep a
7778 reloc so that weak symbols can be overridden. */
7779 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7780 enum bfd_reloc_code_real reloc_type;
7781 unsigned char *opcode;
7784 if (fragP->fr_var != NO_RELOC)
7785 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7787 reloc_type = BFD_RELOC_16_PCREL;
7789 reloc_type = BFD_RELOC_32_PCREL;
7791 old_fr_fix = fragP->fr_fix;
7792 opcode = (unsigned char *) fragP->fr_opcode;
7794 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7797 /* Make jmp (0xeb) a (d)word displacement jump. */
7799 fragP->fr_fix += size;
7800 fix_new (fragP, old_fr_fix, size,
7802 fragP->fr_offset, 1,
7808 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7810 /* Negate the condition, and branch past an
7811 unconditional jump. */
7814 /* Insert an unconditional jump. */
7816 /* We added two extra opcode bytes, and have a two byte
7818 fragP->fr_fix += 2 + 2;
7819 fix_new (fragP, old_fr_fix + 2, 2,
7821 fragP->fr_offset, 1,
7828 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7833 fixP = fix_new (fragP, old_fr_fix, 1,
7835 fragP->fr_offset, 1,
7837 fixP->fx_signed = 1;
7841 /* This changes the byte-displacement jump 0x7N
7842 to the (d)word-displacement jump 0x0f,0x8N. */
7843 opcode[1] = opcode[0] + 0x10;
7844 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7845 /* We've added an opcode byte. */
7846 fragP->fr_fix += 1 + size;
7847 fix_new (fragP, old_fr_fix + 1, size,
7849 fragP->fr_offset, 1,
7854 BAD_CASE (fragP->fr_subtype);
7858 return fragP->fr_fix - old_fr_fix;
7861 /* Guess size depending on current relax state. Initially the relax
7862 state will correspond to a short jump and we return 1, because
7863 the variable part of the frag (the branch offset) is one byte
7864 long. However, we can relax a section more than once and in that
7865 case we must either set fr_subtype back to the unrelaxed state,
7866 or return the value for the appropriate branch. */
7867 return md_relax_table[fragP->fr_subtype].rlx_length;
7870 /* Called after relax() is finished.
7872 In: Address of frag.
7873 fr_type == rs_machine_dependent.
7874 fr_subtype is what the address relaxed to.
7876 Out: Any fixSs and constants are set up.
7877 Caller will turn frag into a ".space 0". */
7880 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7883 unsigned char *opcode;
7884 unsigned char *where_to_put_displacement = NULL;
7885 offsetT target_address;
7886 offsetT opcode_address;
7887 unsigned int extension = 0;
7888 offsetT displacement_from_opcode_start;
7890 opcode = (unsigned char *) fragP->fr_opcode;
7892 /* Address we want to reach in file space. */
7893 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7895 /* Address opcode resides at in file space. */
7896 opcode_address = fragP->fr_address + fragP->fr_fix;
7898 /* Displacement from opcode start to fill into instruction. */
7899 displacement_from_opcode_start = target_address - opcode_address;
7901 if ((fragP->fr_subtype & BIG) == 0)
7903 /* Don't have to change opcode. */
7904 extension = 1; /* 1 opcode + 1 displacement */
7905 where_to_put_displacement = &opcode[1];
7909 if (no_cond_jump_promotion
7910 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7911 as_warn_where (fragP->fr_file, fragP->fr_line,
7912 _("long jump required"));
7914 switch (fragP->fr_subtype)
7916 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7917 extension = 4; /* 1 opcode + 4 displacement */
7919 where_to_put_displacement = &opcode[1];
7922 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7923 extension = 2; /* 1 opcode + 2 displacement */
7925 where_to_put_displacement = &opcode[1];
7928 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7929 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7930 extension = 5; /* 2 opcode + 4 displacement */
7931 opcode[1] = opcode[0] + 0x10;
7932 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7933 where_to_put_displacement = &opcode[2];
7936 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7937 extension = 3; /* 2 opcode + 2 displacement */
7938 opcode[1] = opcode[0] + 0x10;
7939 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7940 where_to_put_displacement = &opcode[2];
7943 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7948 where_to_put_displacement = &opcode[3];
7952 BAD_CASE (fragP->fr_subtype);
7957 /* If size if less then four we are sure that the operand fits,
7958 but if it's 4, then it could be that the displacement is larger
7960 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7962 && ((addressT) (displacement_from_opcode_start - extension
7963 + ((addressT) 1 << 31))
7964 > (((addressT) 2 << 31) - 1)))
7966 as_bad_where (fragP->fr_file, fragP->fr_line,
7967 _("jump target out of range"));
7968 /* Make us emit 0. */
7969 displacement_from_opcode_start = extension;
7971 /* Now put displacement after opcode. */
7972 md_number_to_chars ((char *) where_to_put_displacement,
7973 (valueT) (displacement_from_opcode_start - extension),
7974 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7975 fragP->fr_fix += extension;
7978 /* Apply a fixup (fixP) to segment data, once it has been determined
7979 by our caller that we have all the info we need to fix it up.
7981 Parameter valP is the pointer to the value of the bits.
7983 On the 386, immediates, displacements, and data pointers are all in
7984 the same (little-endian) format, so we don't need to care about which
7988 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7990 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7991 valueT value = *valP;
7993 #if !defined (TE_Mach)
7996 switch (fixP->fx_r_type)
8002 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8005 case BFD_RELOC_X86_64_32S:
8006 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8009 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8012 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8017 if (fixP->fx_addsy != NULL
8018 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8019 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8020 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8021 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8022 && !use_rela_relocations)
8024 /* This is a hack. There should be a better way to handle this.
8025 This covers for the fact that bfd_install_relocation will
8026 subtract the current location (for partial_inplace, PC relative
8027 relocations); see more below. */
8031 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8034 value += fixP->fx_where + fixP->fx_frag->fr_address;
8036 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8039 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8042 || (symbol_section_p (fixP->fx_addsy)
8043 && sym_seg != absolute_section))
8044 && !generic_force_reloc (fixP))
8046 /* Yes, we add the values in twice. This is because
8047 bfd_install_relocation subtracts them out again. I think
8048 bfd_install_relocation is broken, but I don't dare change
8050 value += fixP->fx_where + fixP->fx_frag->fr_address;
8054 #if defined (OBJ_COFF) && defined (TE_PE)
8055 /* For some reason, the PE format does not store a
8056 section address offset for a PC relative symbol. */
8057 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8058 || S_IS_WEAK (fixP->fx_addsy))
8059 value += md_pcrel_from (fixP);
8062 #if defined (OBJ_COFF) && defined (TE_PE)
8063 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8065 value -= S_GET_VALUE (fixP->fx_addsy);
8069 /* Fix a few things - the dynamic linker expects certain values here,
8070 and we must not disappoint it. */
8071 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8072 if (IS_ELF && fixP->fx_addsy)
8073 switch (fixP->fx_r_type)
8075 case BFD_RELOC_386_PLT32:
8076 case BFD_RELOC_X86_64_PLT32:
8077 /* Make the jump instruction point to the address of the operand. At
8078 runtime we merely add the offset to the actual PLT entry. */
8082 case BFD_RELOC_386_TLS_GD:
8083 case BFD_RELOC_386_TLS_LDM:
8084 case BFD_RELOC_386_TLS_IE_32:
8085 case BFD_RELOC_386_TLS_IE:
8086 case BFD_RELOC_386_TLS_GOTIE:
8087 case BFD_RELOC_386_TLS_GOTDESC:
8088 case BFD_RELOC_X86_64_TLSGD:
8089 case BFD_RELOC_X86_64_TLSLD:
8090 case BFD_RELOC_X86_64_GOTTPOFF:
8091 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8092 value = 0; /* Fully resolved at runtime. No addend. */
8094 case BFD_RELOC_386_TLS_LE:
8095 case BFD_RELOC_386_TLS_LDO_32:
8096 case BFD_RELOC_386_TLS_LE_32:
8097 case BFD_RELOC_X86_64_DTPOFF32:
8098 case BFD_RELOC_X86_64_DTPOFF64:
8099 case BFD_RELOC_X86_64_TPOFF32:
8100 case BFD_RELOC_X86_64_TPOFF64:
8101 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8104 case BFD_RELOC_386_TLS_DESC_CALL:
8105 case BFD_RELOC_X86_64_TLSDESC_CALL:
8106 value = 0; /* Fully resolved at runtime. No addend. */
8107 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8111 case BFD_RELOC_386_GOT32:
8112 case BFD_RELOC_X86_64_GOT32:
8113 value = 0; /* Fully resolved at runtime. No addend. */
8116 case BFD_RELOC_VTABLE_INHERIT:
8117 case BFD_RELOC_VTABLE_ENTRY:
8124 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8126 #endif /* !defined (TE_Mach) */
8128 /* Are we finished with this relocation now? */
8129 if (fixP->fx_addsy == NULL)
8131 #if defined (OBJ_COFF) && defined (TE_PE)
8132 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8135 /* Remember value for tc_gen_reloc. */
8136 fixP->fx_addnumber = value;
8137 /* Clear out the frag for now. */
8141 else if (use_rela_relocations)
8143 fixP->fx_no_overflow = 1;
8144 /* Remember value for tc_gen_reloc. */
8145 fixP->fx_addnumber = value;
8149 md_number_to_chars (p, value, fixP->fx_size);
8153 md_atof (int type, char *litP, int *sizeP)
8155 /* This outputs the LITTLENUMs in REVERSE order;
8156 in accord with the bigendian 386. */
8157 return ieee_md_atof (type, litP, sizeP, FALSE);
8160 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8163 output_invalid (int c)
8166 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8169 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8170 "(0x%x)", (unsigned char) c);
8171 return output_invalid_buf;
8174 /* REG_STRING starts *before* REGISTER_PREFIX. */
8176 static const reg_entry *
8177 parse_real_register (char *reg_string, char **end_op)
8179 char *s = reg_string;
8181 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8184 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8185 if (*s == REGISTER_PREFIX)
8188 if (is_space_char (*s))
8192 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8194 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8195 return (const reg_entry *) NULL;
8199 /* For naked regs, make sure that we are not dealing with an identifier.
8200 This prevents confusing an identifier like `eax_var' with register
8202 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8203 return (const reg_entry *) NULL;
8207 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8209 /* Handle floating point regs, allowing spaces in the (i) part. */
8210 if (r == i386_regtab /* %st is first entry of table */)
8212 if (is_space_char (*s))
8217 if (is_space_char (*s))
8219 if (*s >= '0' && *s <= '7')
8223 if (is_space_char (*s))
8228 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8233 /* We have "%st(" then garbage. */
8234 return (const reg_entry *) NULL;
8238 if (r == NULL || allow_pseudo_reg)
8241 if (operand_type_all_zero (&r->reg_type))
8242 return (const reg_entry *) NULL;
8244 if ((r->reg_type.bitfield.reg32
8245 || r->reg_type.bitfield.sreg3
8246 || r->reg_type.bitfield.control
8247 || r->reg_type.bitfield.debug
8248 || r->reg_type.bitfield.test)
8249 && !cpu_arch_flags.bitfield.cpui386)
8250 return (const reg_entry *) NULL;
8252 if (r->reg_type.bitfield.floatreg
8253 && !cpu_arch_flags.bitfield.cpu8087
8254 && !cpu_arch_flags.bitfield.cpu287
8255 && !cpu_arch_flags.bitfield.cpu387)
8256 return (const reg_entry *) NULL;
8258 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8259 return (const reg_entry *) NULL;
8261 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8262 return (const reg_entry *) NULL;
8264 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8265 return (const reg_entry *) NULL;
8267 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8268 if (!allow_index_reg
8269 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8270 return (const reg_entry *) NULL;
8272 if (((r->reg_flags & (RegRex64 | RegRex))
8273 || r->reg_type.bitfield.reg64)
8274 && (!cpu_arch_flags.bitfield.cpulm
8275 || !operand_type_equal (&r->reg_type, &control))
8276 && flag_code != CODE_64BIT)
8277 return (const reg_entry *) NULL;
8279 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8280 return (const reg_entry *) NULL;
8285 /* REG_STRING starts *before* REGISTER_PREFIX. */
8287 static const reg_entry *
8288 parse_register (char *reg_string, char **end_op)
8292 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8293 r = parse_real_register (reg_string, end_op);
8298 char *save = input_line_pointer;
8302 input_line_pointer = reg_string;
8303 c = get_symbol_end ();
8304 symbolP = symbol_find (reg_string);
8305 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8307 const expressionS *e = symbol_get_value_expression (symbolP);
8309 know (e->X_op == O_register);
8310 know (e->X_add_number >= 0
8311 && (valueT) e->X_add_number < i386_regtab_size);
8312 r = i386_regtab + e->X_add_number;
8313 *end_op = input_line_pointer;
8315 *input_line_pointer = c;
8316 input_line_pointer = save;
8322 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8325 char *end = input_line_pointer;
8328 r = parse_register (name, &input_line_pointer);
8329 if (r && end <= input_line_pointer)
8331 *nextcharP = *input_line_pointer;
8332 *input_line_pointer = 0;
8333 e->X_op = O_register;
8334 e->X_add_number = r - i386_regtab;
8337 input_line_pointer = end;
8339 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8343 md_operand (expressionS *e)
8348 switch (*input_line_pointer)
8350 case REGISTER_PREFIX:
8351 r = parse_real_register (input_line_pointer, &end);
8354 e->X_op = O_register;
8355 e->X_add_number = r - i386_regtab;
8356 input_line_pointer = end;
8361 gas_assert (intel_syntax);
8362 end = input_line_pointer++;
8364 if (*input_line_pointer == ']')
8366 ++input_line_pointer;
8367 e->X_op_symbol = make_expr_symbol (e);
8368 e->X_add_symbol = NULL;
8369 e->X_add_number = 0;
8375 input_line_pointer = end;
8382 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8383 const char *md_shortopts = "kVQ:sqn";
8385 const char *md_shortopts = "qn";
8388 #define OPTION_32 (OPTION_MD_BASE + 0)
8389 #define OPTION_64 (OPTION_MD_BASE + 1)
8390 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8391 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8392 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8393 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8394 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8395 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8396 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8397 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8398 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8399 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8400 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8401 #define OPTION_X32 (OPTION_MD_BASE + 13)
8403 struct option md_longopts[] =
8405 {"32", no_argument, NULL, OPTION_32},
8406 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8407 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8408 {"64", no_argument, NULL, OPTION_64},
8410 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8411 {"x32", no_argument, NULL, OPTION_X32},
8413 {"divide", no_argument, NULL, OPTION_DIVIDE},
8414 {"march", required_argument, NULL, OPTION_MARCH},
8415 {"mtune", required_argument, NULL, OPTION_MTUNE},
8416 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8417 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8418 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8419 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8420 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8421 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8422 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8423 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8424 {NULL, no_argument, NULL, 0}
8426 size_t md_longopts_size = sizeof (md_longopts);
8429 md_parse_option (int c, char *arg)
8437 optimize_align_code = 0;
8444 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8445 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8446 should be emitted or not. FIXME: Not implemented. */
8450 /* -V: SVR4 argument to print version ID. */
8452 print_version_id ();
8455 /* -k: Ignore for FreeBSD compatibility. */
8460 /* -s: On i386 Solaris, this tells the native assembler to use
8461 .stab instead of .stab.excl. We always use .stab anyhow. */
8464 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8465 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8468 const char **list, **l;
8470 list = bfd_target_list ();
8471 for (l = list; *l != NULL; l++)
8472 if (CONST_STRNEQ (*l, "elf64-x86-64")
8473 || strcmp (*l, "coff-x86-64") == 0
8474 || strcmp (*l, "pe-x86-64") == 0
8475 || strcmp (*l, "pei-x86-64") == 0
8476 || strcmp (*l, "mach-o-x86-64") == 0)
8478 default_arch = "x86_64";
8482 as_fatal (_("no compiled in support for x86_64"));
8488 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8492 const char **list, **l;
8494 list = bfd_target_list ();
8495 for (l = list; *l != NULL; l++)
8496 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8498 default_arch = "x86_64:32";
8502 as_fatal (_("no compiled in support for 32bit x86_64"));
8506 as_fatal (_("32bit x86_64 is only supported for ELF"));
8511 default_arch = "i386";
8515 #ifdef SVR4_COMMENT_CHARS
8520 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8522 for (s = i386_comment_chars; *s != '\0'; s++)
8526 i386_comment_chars = n;
8532 arch = xstrdup (arg);
8536 as_fatal (_("invalid -march= option: `%s'"), arg);
8537 next = strchr (arch, '+');
8540 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8542 if (strcmp (arch, cpu_arch [j].name) == 0)
8545 if (! cpu_arch[j].flags.bitfield.cpui386)
8548 cpu_arch_name = cpu_arch[j].name;
8549 cpu_sub_arch_name = NULL;
8550 cpu_arch_flags = cpu_arch[j].flags;
8551 cpu_arch_isa = cpu_arch[j].type;
8552 cpu_arch_isa_flags = cpu_arch[j].flags;
8553 if (!cpu_arch_tune_set)
8555 cpu_arch_tune = cpu_arch_isa;
8556 cpu_arch_tune_flags = cpu_arch_isa_flags;
8560 else if (*cpu_arch [j].name == '.'
8561 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8563 /* ISA entension. */
8564 i386_cpu_flags flags;
8566 if (!cpu_arch[j].negated)
8567 flags = cpu_flags_or (cpu_arch_flags,
8570 flags = cpu_flags_and_not (cpu_arch_flags,
8572 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8574 if (cpu_sub_arch_name)
8576 char *name = cpu_sub_arch_name;
8577 cpu_sub_arch_name = concat (name,
8579 (const char *) NULL);
8583 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8584 cpu_arch_flags = flags;
8585 cpu_arch_isa_flags = flags;
8591 if (j >= ARRAY_SIZE (cpu_arch))
8592 as_fatal (_("invalid -march= option: `%s'"), arg);
8596 while (next != NULL );
8601 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8602 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8604 if (strcmp (arg, cpu_arch [j].name) == 0)
8606 cpu_arch_tune_set = 1;
8607 cpu_arch_tune = cpu_arch [j].type;
8608 cpu_arch_tune_flags = cpu_arch[j].flags;
8612 if (j >= ARRAY_SIZE (cpu_arch))
8613 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8616 case OPTION_MMNEMONIC:
8617 if (strcasecmp (arg, "att") == 0)
8619 else if (strcasecmp (arg, "intel") == 0)
8622 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8625 case OPTION_MSYNTAX:
8626 if (strcasecmp (arg, "att") == 0)
8628 else if (strcasecmp (arg, "intel") == 0)
8631 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8634 case OPTION_MINDEX_REG:
8635 allow_index_reg = 1;
8638 case OPTION_MNAKED_REG:
8639 allow_naked_reg = 1;
8642 case OPTION_MOLD_GCC:
8646 case OPTION_MSSE2AVX:
8650 case OPTION_MSSE_CHECK:
8651 if (strcasecmp (arg, "error") == 0)
8652 sse_check = sse_check_error;
8653 else if (strcasecmp (arg, "warning") == 0)
8654 sse_check = sse_check_warning;
8655 else if (strcasecmp (arg, "none") == 0)
8656 sse_check = sse_check_none;
8658 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8661 case OPTION_MAVXSCALAR:
8662 if (strcasecmp (arg, "128") == 0)
8664 else if (strcasecmp (arg, "256") == 0)
8667 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8676 #define MESSAGE_TEMPLATE \
8680 show_arch (FILE *stream, int ext, int check)
8682 static char message[] = MESSAGE_TEMPLATE;
8683 char *start = message + 27;
8685 int size = sizeof (MESSAGE_TEMPLATE);
8692 left = size - (start - message);
8693 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8695 /* Should it be skipped? */
8696 if (cpu_arch [j].skip)
8699 name = cpu_arch [j].name;
8700 len = cpu_arch [j].len;
8703 /* It is an extension. Skip if we aren't asked to show it. */
8714 /* It is an processor. Skip if we show only extension. */
8717 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8719 /* It is an impossible processor - skip. */
8723 /* Reserve 2 spaces for ", " or ",\0" */
8726 /* Check if there is any room. */
8734 p = mempcpy (p, name, len);
8738 /* Output the current message now and start a new one. */
8741 fprintf (stream, "%s\n", message);
8743 left = size - (start - message) - len - 2;
8745 gas_assert (left >= 0);
8747 p = mempcpy (p, name, len);
8752 fprintf (stream, "%s\n", message);
8756 md_show_usage (FILE *stream)
8758 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8759 fprintf (stream, _("\
8761 -V print assembler version number\n\
8764 fprintf (stream, _("\
8765 -n Do not optimize code alignment\n\
8766 -q quieten some warnings\n"));
8767 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8768 fprintf (stream, _("\
8771 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8772 || defined (TE_PE) || defined (TE_PEP))
8773 fprintf (stream, _("\
8774 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8776 #ifdef SVR4_COMMENT_CHARS
8777 fprintf (stream, _("\
8778 --divide do not treat `/' as a comment character\n"));
8780 fprintf (stream, _("\
8781 --divide ignored\n"));
8783 fprintf (stream, _("\
8784 -march=CPU[,+EXTENSION...]\n\
8785 generate code for CPU and EXTENSION, CPU is one of:\n"));
8786 show_arch (stream, 0, 1);
8787 fprintf (stream, _("\
8788 EXTENSION is combination of:\n"));
8789 show_arch (stream, 1, 0);
8790 fprintf (stream, _("\
8791 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8792 show_arch (stream, 0, 0);
8793 fprintf (stream, _("\
8794 -msse2avx encode SSE instructions with VEX prefix\n"));
8795 fprintf (stream, _("\
8796 -msse-check=[none|error|warning]\n\
8797 check SSE instructions\n"));
8798 fprintf (stream, _("\
8799 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8801 fprintf (stream, _("\
8802 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8803 fprintf (stream, _("\
8804 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8805 fprintf (stream, _("\
8806 -mindex-reg support pseudo index registers\n"));
8807 fprintf (stream, _("\
8808 -mnaked-reg don't require `%%' prefix for registers\n"));
8809 fprintf (stream, _("\
8810 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8813 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8814 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8815 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8817 /* Pick the target format to use. */
8820 i386_target_format (void)
8822 if (!strncmp (default_arch, "x86_64", 6))
8824 update_code_flag (CODE_64BIT, 1);
8825 if (default_arch[6] == '\0')
8826 x86_elf_abi = X86_64_ABI;
8828 x86_elf_abi = X86_64_X32_ABI;
8830 else if (!strcmp (default_arch, "i386"))
8831 update_code_flag (CODE_32BIT, 1);
8833 as_fatal (_("unknown architecture"));
8835 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8836 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8837 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8838 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8840 switch (OUTPUT_FLAVOR)
8842 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8843 case bfd_target_aout_flavour:
8844 return AOUT_TARGET_FORMAT;
8846 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8847 # if defined (TE_PE) || defined (TE_PEP)
8848 case bfd_target_coff_flavour:
8849 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8850 # elif defined (TE_GO32)
8851 case bfd_target_coff_flavour:
8854 case bfd_target_coff_flavour:
8858 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8859 case bfd_target_elf_flavour:
8863 switch (x86_elf_abi)
8866 format = ELF_TARGET_FORMAT;
8869 use_rela_relocations = 1;
8871 format = ELF_TARGET_FORMAT64;
8873 case X86_64_X32_ABI:
8874 use_rela_relocations = 1;
8876 disallow_64bit_reloc = 1;
8877 format = ELF_TARGET_FORMAT32;
8880 if (cpu_arch_isa == PROCESSOR_L1OM)
8882 if (x86_elf_abi != X86_64_ABI)
8883 as_fatal (_("Intel L1OM is 64bit only"));
8884 return ELF_TARGET_L1OM_FORMAT;
8886 if (cpu_arch_isa == PROCESSOR_K1OM)
8888 if (x86_elf_abi != X86_64_ABI)
8889 as_fatal (_("Intel K1OM is 64bit only"));
8890 return ELF_TARGET_K1OM_FORMAT;
8896 #if defined (OBJ_MACH_O)
8897 case bfd_target_mach_o_flavour:
8898 if (flag_code == CODE_64BIT)
8900 use_rela_relocations = 1;
8902 return "mach-o-x86-64";
8905 return "mach-o-i386";
8913 #endif /* OBJ_MAYBE_ more than one */
8915 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8917 i386_elf_emit_arch_note (void)
8919 if (IS_ELF && cpu_arch_name != NULL)
8922 asection *seg = now_seg;
8923 subsegT subseg = now_subseg;
8924 Elf_Internal_Note i_note;
8925 Elf_External_Note e_note;
8926 asection *note_secp;
8929 /* Create the .note section. */
8930 note_secp = subseg_new (".note", 0);
8931 bfd_set_section_flags (stdoutput,
8933 SEC_HAS_CONTENTS | SEC_READONLY);
8935 /* Process the arch string. */
8936 len = strlen (cpu_arch_name);
8938 i_note.namesz = len + 1;
8940 i_note.type = NT_ARCH;
8941 p = frag_more (sizeof (e_note.namesz));
8942 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8943 p = frag_more (sizeof (e_note.descsz));
8944 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8945 p = frag_more (sizeof (e_note.type));
8946 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8947 p = frag_more (len + 1);
8948 strcpy (p, cpu_arch_name);
8950 frag_align (2, 0, 0);
8952 subseg_set (seg, subseg);
8958 md_undefined_symbol (char *name)
8960 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8961 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8962 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8963 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8967 if (symbol_find (name))
8968 as_bad (_("GOT already in symbol table"));
8969 GOT_symbol = symbol_new (name, undefined_section,
8970 (valueT) 0, &zero_address_frag);
8977 /* Round up a section size to the appropriate boundary. */
8980 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8982 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8983 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8985 /* For a.out, force the section size to be aligned. If we don't do
8986 this, BFD will align it for us, but it will not write out the
8987 final bytes of the section. This may be a bug in BFD, but it is
8988 easier to fix it here since that is how the other a.out targets
8992 align = bfd_get_section_alignment (stdoutput, segment);
8993 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9000 /* On the i386, PC-relative offsets are relative to the start of the
9001 next instruction. That is, the address of the offset, plus its
9002 size, since the offset is always the last part of the insn. */
9005 md_pcrel_from (fixS *fixP)
9007 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9013 s_bss (int ignore ATTRIBUTE_UNUSED)
9017 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9019 obj_elf_section_change_hook ();
9021 temp = get_absolute_expression ();
9022 subseg_set (bss_section, (subsegT) temp);
9023 demand_empty_rest_of_line ();
9029 i386_validate_fix (fixS *fixp)
9031 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9033 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9037 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9042 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9044 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9051 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9054 bfd_reloc_code_real_type code;
9056 switch (fixp->fx_r_type)
9058 case BFD_RELOC_X86_64_PLT32:
9059 case BFD_RELOC_X86_64_GOT32:
9060 case BFD_RELOC_X86_64_GOTPCREL:
9061 case BFD_RELOC_386_PLT32:
9062 case BFD_RELOC_386_GOT32:
9063 case BFD_RELOC_386_GOTOFF:
9064 case BFD_RELOC_386_GOTPC:
9065 case BFD_RELOC_386_TLS_GD:
9066 case BFD_RELOC_386_TLS_LDM:
9067 case BFD_RELOC_386_TLS_LDO_32:
9068 case BFD_RELOC_386_TLS_IE_32:
9069 case BFD_RELOC_386_TLS_IE:
9070 case BFD_RELOC_386_TLS_GOTIE:
9071 case BFD_RELOC_386_TLS_LE_32:
9072 case BFD_RELOC_386_TLS_LE:
9073 case BFD_RELOC_386_TLS_GOTDESC:
9074 case BFD_RELOC_386_TLS_DESC_CALL:
9075 case BFD_RELOC_X86_64_TLSGD:
9076 case BFD_RELOC_X86_64_TLSLD:
9077 case BFD_RELOC_X86_64_DTPOFF32:
9078 case BFD_RELOC_X86_64_DTPOFF64:
9079 case BFD_RELOC_X86_64_GOTTPOFF:
9080 case BFD_RELOC_X86_64_TPOFF32:
9081 case BFD_RELOC_X86_64_TPOFF64:
9082 case BFD_RELOC_X86_64_GOTOFF64:
9083 case BFD_RELOC_X86_64_GOTPC32:
9084 case BFD_RELOC_X86_64_GOT64:
9085 case BFD_RELOC_X86_64_GOTPCREL64:
9086 case BFD_RELOC_X86_64_GOTPC64:
9087 case BFD_RELOC_X86_64_GOTPLT64:
9088 case BFD_RELOC_X86_64_PLTOFF64:
9089 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9090 case BFD_RELOC_X86_64_TLSDESC_CALL:
9092 case BFD_RELOC_VTABLE_ENTRY:
9093 case BFD_RELOC_VTABLE_INHERIT:
9095 case BFD_RELOC_32_SECREL:
9097 code = fixp->fx_r_type;
9099 case BFD_RELOC_X86_64_32S:
9100 if (!fixp->fx_pcrel)
9102 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9103 code = fixp->fx_r_type;
9109 switch (fixp->fx_size)
9112 as_bad_where (fixp->fx_file, fixp->fx_line,
9113 _("can not do %d byte pc-relative relocation"),
9115 code = BFD_RELOC_32_PCREL;
9117 case 1: code = BFD_RELOC_8_PCREL; break;
9118 case 2: code = BFD_RELOC_16_PCREL; break;
9119 case 4: code = BFD_RELOC_32_PCREL; break;
9121 case 8: code = BFD_RELOC_64_PCREL; break;
9127 switch (fixp->fx_size)
9130 as_bad_where (fixp->fx_file, fixp->fx_line,
9131 _("can not do %d byte relocation"),
9133 code = BFD_RELOC_32;
9135 case 1: code = BFD_RELOC_8; break;
9136 case 2: code = BFD_RELOC_16; break;
9137 case 4: code = BFD_RELOC_32; break;
9139 case 8: code = BFD_RELOC_64; break;
9146 if ((code == BFD_RELOC_32
9147 || code == BFD_RELOC_32_PCREL
9148 || code == BFD_RELOC_X86_64_32S)
9150 && fixp->fx_addsy == GOT_symbol)
9153 code = BFD_RELOC_386_GOTPC;
9155 code = BFD_RELOC_X86_64_GOTPC32;
9157 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9159 && fixp->fx_addsy == GOT_symbol)
9161 code = BFD_RELOC_X86_64_GOTPC64;
9164 rel = (arelent *) xmalloc (sizeof (arelent));
9165 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9166 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9168 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9170 if (!use_rela_relocations)
9172 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9173 vtable entry to be used in the relocation's section offset. */
9174 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9175 rel->address = fixp->fx_offset;
9176 #if defined (OBJ_COFF) && defined (TE_PE)
9177 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9178 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9183 /* Use the rela in 64bit mode. */
9186 if (disallow_64bit_reloc)
9189 case BFD_RELOC_X86_64_DTPOFF64:
9190 case BFD_RELOC_X86_64_TPOFF64:
9191 case BFD_RELOC_64_PCREL:
9192 case BFD_RELOC_X86_64_GOTOFF64:
9193 case BFD_RELOC_X86_64_GOT64:
9194 case BFD_RELOC_X86_64_GOTPCREL64:
9195 case BFD_RELOC_X86_64_GOTPC64:
9196 case BFD_RELOC_X86_64_GOTPLT64:
9197 case BFD_RELOC_X86_64_PLTOFF64:
9198 as_bad_where (fixp->fx_file, fixp->fx_line,
9199 _("cannot represent relocation type %s in x32 mode"),
9200 bfd_get_reloc_code_name (code));
9206 if (!fixp->fx_pcrel)
9207 rel->addend = fixp->fx_offset;
9211 case BFD_RELOC_X86_64_PLT32:
9212 case BFD_RELOC_X86_64_GOT32:
9213 case BFD_RELOC_X86_64_GOTPCREL:
9214 case BFD_RELOC_X86_64_TLSGD:
9215 case BFD_RELOC_X86_64_TLSLD:
9216 case BFD_RELOC_X86_64_GOTTPOFF:
9217 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9218 case BFD_RELOC_X86_64_TLSDESC_CALL:
9219 rel->addend = fixp->fx_offset - fixp->fx_size;
9222 rel->addend = (section->vma
9224 + fixp->fx_addnumber
9225 + md_pcrel_from (fixp));
9230 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9231 if (rel->howto == NULL)
9233 as_bad_where (fixp->fx_file, fixp->fx_line,
9234 _("cannot represent relocation type %s"),
9235 bfd_get_reloc_code_name (code));
9236 /* Set howto to a garbage value so that we can keep going. */
9237 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9238 gas_assert (rel->howto != NULL);
9244 #include "tc-i386-intel.c"
9247 tc_x86_parse_to_dw2regnum (expressionS *exp)
9249 int saved_naked_reg;
9250 char saved_register_dot;
9252 saved_naked_reg = allow_naked_reg;
9253 allow_naked_reg = 1;
9254 saved_register_dot = register_chars['.'];
9255 register_chars['.'] = '.';
9256 allow_pseudo_reg = 1;
9257 expression_and_evaluate (exp);
9258 allow_pseudo_reg = 0;
9259 register_chars['.'] = saved_register_dot;
9260 allow_naked_reg = saved_naked_reg;
9262 if (exp->X_op == O_register && exp->X_add_number >= 0)
9264 if ((addressT) exp->X_add_number < i386_regtab_size)
9266 exp->X_op = O_constant;
9267 exp->X_add_number = i386_regtab[exp->X_add_number]
9268 .dw2_regnum[flag_code >> 1];
9271 exp->X_op = O_illegal;
9276 tc_x86_frame_initial_instructions (void)
9278 static unsigned int sp_regno[2];
9280 if (!sp_regno[flag_code >> 1])
9282 char *saved_input = input_line_pointer;
9283 char sp[][4] = {"esp", "rsp"};
9286 input_line_pointer = sp[flag_code >> 1];
9287 tc_x86_parse_to_dw2regnum (&exp);
9288 gas_assert (exp.X_op == O_constant);
9289 sp_regno[flag_code >> 1] = exp.X_add_number;
9290 input_line_pointer = saved_input;
9293 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9294 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9298 x86_dwarf2_addr_size (void)
9300 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9301 if (x86_elf_abi == X86_64_X32_ABI)
9304 return bfd_arch_bits_per_address (stdoutput) / 8;
9308 i386_elf_section_type (const char *str, size_t len)
9310 if (flag_code == CODE_64BIT
9311 && len == sizeof ("unwind") - 1
9312 && strncmp (str, "unwind", 6) == 0)
9313 return SHT_X86_64_UNWIND;
9320 i386_solaris_fix_up_eh_frame (segT sec)
9322 if (flag_code == CODE_64BIT)
9323 elf_section_type (sec) = SHT_X86_64_UNWIND;
9329 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9333 exp.X_op = O_secrel;
9334 exp.X_add_symbol = symbol;
9335 exp.X_add_number = 0;
9336 emit_expr (&exp, size);
9340 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9341 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9344 x86_64_section_letter (int letter, char **ptr_msg)
9346 if (flag_code == CODE_64BIT)
9349 return SHF_X86_64_LARGE;
9351 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9354 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9359 x86_64_section_word (char *str, size_t len)
9361 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9362 return SHF_X86_64_LARGE;
9368 handle_large_common (int small ATTRIBUTE_UNUSED)
9370 if (flag_code != CODE_64BIT)
9372 s_comm_internal (0, elf_common_parse);
9373 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9377 static segT lbss_section;
9378 asection *saved_com_section_ptr = elf_com_section_ptr;
9379 asection *saved_bss_section = bss_section;
9381 if (lbss_section == NULL)
9383 flagword applicable;
9385 subsegT subseg = now_subseg;
9387 /* The .lbss section is for local .largecomm symbols. */
9388 lbss_section = subseg_new (".lbss", 0);
9389 applicable = bfd_applicable_section_flags (stdoutput);
9390 bfd_set_section_flags (stdoutput, lbss_section,
9391 applicable & SEC_ALLOC);
9392 seg_info (lbss_section)->bss = 1;
9394 subseg_set (seg, subseg);
9397 elf_com_section_ptr = &_bfd_elf_large_com_section;
9398 bss_section = lbss_section;
9400 s_comm_internal (0, elf_common_parse);
9402 elf_com_section_ptr = saved_com_section_ptr;
9403 bss_section = saved_bss_section;
9406 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */