1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
32 #include "safe-ctype.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
48 #define DEFAULT_ARCH "i386"
53 #define INLINE __inline__
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
69 #define HLE_PREFIX REP_PREFIX
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
92 #define END_OF_INSN '\0'
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
103 const insn_template *start;
104 const insn_template *end;
108 /* 386 operand encoding bytes: see 386 book for details of this. */
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
120 /* 386 opcode byte to code indirect addressing. */
129 /* x86 arch names, types and features */
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
150 static void pe_directive_secrel (int);
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
183 static void s_bss (int);
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
189 static const char *default_arch = DEFAULT_ARCH;
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
201 /* 'md_assemble ()' gathers together information and puts it into a
208 const reg_entry *regs;
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
220 unsupported_with_intel_mnemonic,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
230 /* TM holds the template for the insn were currently assembling. */
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
249 /* Displacement expression, immediate expression, or register for each
251 union i386_op op[MAX_OPERANDS];
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
285 /* Prefer 8bit or 32bit displacement in encoding. */
288 disp_encoding_default = 0,
293 /* Have HLE prefix. */
294 unsigned int have_hle;
297 enum i386_error error;
300 typedef struct _i386_insn i386_insn;
302 /* List of chars besides those in app.c:symbol_chars that can start an
303 operand. Used to prevent the scrubber eating vital white-space. */
304 const char extra_symbol_chars[] = "*%-(["
313 #if (defined (TE_I386AIX) \
314 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
315 && !defined (TE_GNU) \
316 && !defined (TE_LINUX) \
317 && !defined (TE_NACL) \
318 && !defined (TE_NETWARE) \
319 && !defined (TE_FreeBSD) \
320 && !defined (TE_DragonFly) \
321 && !defined (TE_NetBSD)))
322 /* This array holds the chars that always start a comment. If the
323 pre-processor is disabled, these aren't very useful. The option
324 --divide will remove '/' from this list. */
325 const char *i386_comment_chars = "#/";
326 #define SVR4_COMMENT_CHARS 1
327 #define PREFIX_SEPARATOR '\\'
330 const char *i386_comment_chars = "#";
331 #define PREFIX_SEPARATOR '/'
334 /* This array holds the chars that only start a comment at the beginning of
335 a line. If the line seems to have the form '# 123 filename'
336 .line and .file directives will appear in the pre-processed output.
337 Note that input_file.c hand checks for '#' at the beginning of the
338 first line of the input file. This is because the compiler outputs
339 #NO_APP at the beginning of its output.
340 Also note that comments started like this one will always work if
341 '/' isn't otherwise defined. */
342 const char line_comment_chars[] = "#/";
344 const char line_separator_chars[] = ";";
346 /* Chars that can be used to separate mant from exp in floating point
348 const char EXP_CHARS[] = "eE";
350 /* Chars that mean this number is a floating point constant
353 const char FLT_CHARS[] = "fFdDxX";
355 /* Tables for lexical analysis. */
356 static char mnemonic_chars[256];
357 static char register_chars[256];
358 static char operand_chars[256];
359 static char identifier_chars[256];
360 static char digit_chars[256];
362 /* Lexical macros. */
363 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
364 #define is_operand_char(x) (operand_chars[(unsigned char) x])
365 #define is_register_char(x) (register_chars[(unsigned char) x])
366 #define is_space_char(x) ((x) == ' ')
367 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
368 #define is_digit_char(x) (digit_chars[(unsigned char) x])
370 /* All non-digit non-letter characters that may occur in an operand. */
371 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
373 /* md_assemble() always leaves the strings it's passed unaltered. To
374 effect this we maintain a stack of saved characters that we've smashed
375 with '\0's (indicating end of strings for various sub-fields of the
376 assembler instruction). */
377 static char save_stack[32];
378 static char *save_stack_p;
379 #define END_STRING_AND_SAVE(s) \
380 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
381 #define RESTORE_END_STRING(s) \
382 do { *(s) = *--save_stack_p; } while (0)
384 /* The instruction we're assembling. */
387 /* Possible templates for current insn. */
388 static const templates *current_templates;
390 /* Per instruction expressionS buffers: max displacements & immediates. */
391 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
392 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
394 /* Current operand we are working on. */
395 static int this_operand = -1;
397 /* We support four different modes. FLAG_CODE variable is used to distinguish
405 static enum flag_code flag_code;
406 static unsigned int object_64bit;
407 static unsigned int disallow_64bit_reloc;
408 static int use_rela_relocations = 0;
410 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
411 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
412 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
414 /* The ELF ABI to use. */
422 static enum x86_elf_abi x86_elf_abi = I386_ABI;
425 /* The names used to print error messages. */
426 static const char *flag_code_names[] =
433 /* 1 for intel syntax,
435 static int intel_syntax = 0;
437 /* 1 for intel mnemonic,
438 0 if att mnemonic. */
439 static int intel_mnemonic = !SYSV386_COMPAT;
441 /* 1 if support old (<= 2.8.1) versions of gcc. */
442 static int old_gcc = OLDGCC_COMPAT;
444 /* 1 if pseudo registers are permitted. */
445 static int allow_pseudo_reg = 0;
447 /* 1 if register prefix % not required. */
448 static int allow_naked_reg = 0;
450 /* 1 if pseudo index register, eiz/riz, is allowed . */
451 static int allow_index_reg = 0;
453 static enum check_kind
459 sse_check, operand_check = check_warning;
461 /* Register prefix used for error message. */
462 static const char *register_prefix = "%";
464 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
465 leave, push, and pop instructions so that gcc has the same stack
466 frame as in 32 bit mode. */
467 static char stackop_size = '\0';
469 /* Non-zero to optimize code alignment. */
470 int optimize_align_code = 1;
472 /* Non-zero to quieten some warnings. */
473 static int quiet_warnings = 0;
476 static const char *cpu_arch_name = NULL;
477 static char *cpu_sub_arch_name = NULL;
479 /* CPU feature flags. */
480 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
482 /* If we have selected a cpu we are generating instructions for. */
483 static int cpu_arch_tune_set = 0;
485 /* Cpu we are generating instructions for. */
486 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
488 /* CPU feature flags of cpu we are generating instructions for. */
489 static i386_cpu_flags cpu_arch_tune_flags;
491 /* CPU instruction set architecture used. */
492 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
494 /* CPU feature flags of instruction set architecture used. */
495 i386_cpu_flags cpu_arch_isa_flags;
497 /* If set, conditional jumps are not automatically promoted to handle
498 larger than a byte offset. */
499 static unsigned int no_cond_jump_promotion = 0;
501 /* Encode SSE instructions with VEX prefix. */
502 static unsigned int sse2avx;
504 /* Encode scalar AVX instructions with specific vector length. */
511 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
512 static symbolS *GOT_symbol;
514 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
515 unsigned int x86_dwarf2_return_column;
517 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
518 int x86_cie_data_alignment;
520 /* Interface to relax_segment.
521 There are 3 major relax states for 386 jump insns because the
522 different types of jumps add different sizes to frags when we're
523 figuring out what sort of jump to choose to reach a given label. */
526 #define UNCOND_JUMP 0
528 #define COND_JUMP86 2
533 #define SMALL16 (SMALL | CODE16)
535 #define BIG16 (BIG | CODE16)
539 #define INLINE __inline__
545 #define ENCODE_RELAX_STATE(type, size) \
546 ((relax_substateT) (((type) << 2) | (size)))
547 #define TYPE_FROM_RELAX_STATE(s) \
549 #define DISP_SIZE_FROM_RELAX_STATE(s) \
550 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
552 /* This table is used by relax_frag to promote short jumps to long
553 ones where necessary. SMALL (short) jumps may be promoted to BIG
554 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
555 don't allow a short jump in a 32 bit code segment to be promoted to
556 a 16 bit offset jump because it's slower (requires data size
557 prefix), and doesn't work, unless the destination is in the bottom
558 64k of the code segment (The top 16 bits of eip are zeroed). */
560 const relax_typeS md_relax_table[] =
563 1) most positive reach of this state,
564 2) most negative reach of this state,
565 3) how many bytes this mode will have in the variable part of the frag
566 4) which index into the table to try if we can't fit into this one. */
568 /* UNCOND_JUMP states. */
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
570 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
571 /* dword jmp adds 4 bytes to frag:
572 0 extra opcode bytes, 4 displacement bytes. */
574 /* word jmp adds 2 byte2 to frag:
575 0 extra opcode bytes, 2 displacement bytes. */
578 /* COND_JUMP states. */
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
580 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
581 /* dword conditionals adds 5 bytes to frag:
582 1 extra opcode byte, 4 displacement bytes. */
584 /* word conditionals add 3 bytes to frag:
585 1 extra opcode byte, 2 displacement bytes. */
588 /* COND_JUMP86 states. */
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
590 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
591 /* dword conditionals adds 5 bytes to frag:
592 1 extra opcode byte, 4 displacement bytes. */
594 /* word conditionals add 4 bytes to frag:
595 1 displacement byte and a 3 byte long branch insn. */
599 static const arch_entry cpu_arch[] =
601 /* Do not replace the first two entries - i386_target_format()
602 relies on them being there in this order. */
603 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
604 CPU_GENERIC32_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
606 CPU_GENERIC64_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
608 CPU_NONE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
610 CPU_I186_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
612 CPU_I286_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
614 CPU_I386_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
616 CPU_I486_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
618 CPU_I586_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
620 CPU_I686_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
622 CPU_I586_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
624 CPU_PENTIUMPRO_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
626 CPU_P2_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
628 CPU_P3_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
630 CPU_P4_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
634 CPU_NOCONA_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
636 CPU_CORE_FLAGS, 1, 0 },
637 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
638 CPU_CORE_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
640 CPU_CORE2_FLAGS, 1, 0 },
641 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
642 CPU_CORE2_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
644 CPU_COREI7_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
646 CPU_L1OM_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
648 CPU_K1OM_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
650 CPU_K6_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
652 CPU_K6_2_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
654 CPU_ATHLON_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
656 CPU_K8_FLAGS, 1, 0 },
657 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
658 CPU_K8_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
660 CPU_K8_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
662 CPU_AMDFAM10_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
664 CPU_BDVER1_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
666 CPU_BDVER2_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
668 CPU_BDVER3_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
670 CPU_BTVER1_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
672 CPU_BTVER2_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
674 CPU_8087_FLAGS, 0, 0 },
675 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
676 CPU_287_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
678 CPU_387_FLAGS, 0, 0 },
679 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
680 CPU_ANY87_FLAGS, 0, 1 },
681 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
682 CPU_MMX_FLAGS, 0, 0 },
683 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
684 CPU_3DNOWA_FLAGS, 0, 1 },
685 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
686 CPU_SSE_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
688 CPU_SSE2_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
690 CPU_SSE3_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
692 CPU_SSSE3_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
694 CPU_SSE4_1_FLAGS, 0, 0 },
695 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
696 CPU_SSE4_2_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
698 CPU_SSE4_2_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
700 CPU_ANY_SSE_FLAGS, 0, 1 },
701 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
702 CPU_AVX_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
704 CPU_AVX2_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
706 CPU_ANY_AVX_FLAGS, 0, 1 },
707 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
708 CPU_VMX_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
710 CPU_VMFUNC_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
712 CPU_SMX_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
714 CPU_XSAVE_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
716 CPU_XSAVEOPT_FLAGS, 0, 0 },
717 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
718 CPU_AES_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
720 CPU_PCLMUL_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
722 CPU_PCLMUL_FLAGS, 1, 0 },
723 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
724 CPU_FSGSBASE_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
726 CPU_RDRND_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
728 CPU_F16C_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
730 CPU_BMI2_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
732 CPU_FMA_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
734 CPU_FMA4_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
736 CPU_XOP_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
738 CPU_LWP_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
740 CPU_MOVBE_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
742 CPU_CX16_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
744 CPU_EPT_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
746 CPU_LZCNT_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
748 CPU_HLE_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
750 CPU_RTM_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
752 CPU_INVPCID_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
754 CPU_CLFLUSH_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
756 CPU_NOP_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
758 CPU_SYSCALL_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
760 CPU_RDTSCP_FLAGS, 0, 0 },
761 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
762 CPU_3DNOW_FLAGS, 0, 0 },
763 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
764 CPU_3DNOWA_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
766 CPU_PADLOCK_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
768 CPU_SVME_FLAGS, 1, 0 },
769 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
770 CPU_SVME_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
772 CPU_SSE4A_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
774 CPU_ABM_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
776 CPU_BMI_FLAGS, 0, 0 },
777 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
778 CPU_TBM_FLAGS, 0, 0 },
779 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
780 CPU_ADX_FLAGS, 0, 0 },
781 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
782 CPU_RDSEED_FLAGS, 0, 0 },
783 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
784 CPU_PRFCHW_FLAGS, 0, 0 },
788 /* Like s_lcomm_internal in gas/read.c but the alignment string
789 is allowed to be optional. */
792 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
799 && *input_line_pointer == ',')
801 align = parse_align (needs_align - 1);
803 if (align == (addressT) -1)
818 bss_alloc (symbolP, size, align);
823 pe_lcomm (int needs_align)
825 s_comm_internal (needs_align * 2, pe_lcomm_internal);
829 const pseudo_typeS md_pseudo_table[] =
831 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
832 {"align", s_align_bytes, 0},
834 {"align", s_align_ptwo, 0},
836 {"arch", set_cpu_arch, 0},
840 {"lcomm", pe_lcomm, 1},
842 {"ffloat", float_cons, 'f'},
843 {"dfloat", float_cons, 'd'},
844 {"tfloat", float_cons, 'x'},
846 {"slong", signed_cons, 4},
847 {"noopt", s_ignore, 0},
848 {"optim", s_ignore, 0},
849 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
850 {"code16", set_code_flag, CODE_16BIT},
851 {"code32", set_code_flag, CODE_32BIT},
852 {"code64", set_code_flag, CODE_64BIT},
853 {"intel_syntax", set_intel_syntax, 1},
854 {"att_syntax", set_intel_syntax, 0},
855 {"intel_mnemonic", set_intel_mnemonic, 1},
856 {"att_mnemonic", set_intel_mnemonic, 0},
857 {"allow_index_reg", set_allow_index_reg, 1},
858 {"disallow_index_reg", set_allow_index_reg, 0},
859 {"sse_check", set_check, 0},
860 {"operand_check", set_check, 1},
861 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
862 {"largecomm", handle_large_common, 0},
864 {"file", (void (*) (int)) dwarf2_directive_file, 0},
865 {"loc", dwarf2_directive_loc, 0},
866 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
869 {"secrel32", pe_directive_secrel, 0},
874 /* For interface with expression (). */
875 extern char *input_line_pointer;
877 /* Hash table for instruction mnemonic lookup. */
878 static struct hash_control *op_hash;
880 /* Hash table for register lookup. */
881 static struct hash_control *reg_hash;
884 i386_align_code (fragS *fragP, int count)
886 /* Various efficient no-op patterns for aligning code labels.
887 Note: Don't try to assemble the instructions in the comments.
888 0L and 0w are not legal. */
889 static const char f32_1[] =
891 static const char f32_2[] =
892 {0x66,0x90}; /* xchg %ax,%ax */
893 static const char f32_3[] =
894 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
895 static const char f32_4[] =
896 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
897 static const char f32_5[] =
899 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
900 static const char f32_6[] =
901 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
902 static const char f32_7[] =
903 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
904 static const char f32_8[] =
906 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
907 static const char f32_9[] =
908 {0x89,0xf6, /* movl %esi,%esi */
909 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
910 static const char f32_10[] =
911 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
912 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
913 static const char f32_11[] =
914 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
915 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
916 static const char f32_12[] =
917 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
918 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
919 static const char f32_13[] =
920 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
921 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
922 static const char f32_14[] =
923 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
924 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
925 static const char f16_3[] =
926 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
927 static const char f16_4[] =
928 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
929 static const char f16_5[] =
931 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
932 static const char f16_6[] =
933 {0x89,0xf6, /* mov %si,%si */
934 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
935 static const char f16_7[] =
936 {0x8d,0x74,0x00, /* lea 0(%si),%si */
937 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
938 static const char f16_8[] =
939 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
940 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
941 static const char jump_31[] =
942 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
943 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
944 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
945 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
946 static const char *const f32_patt[] = {
947 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
948 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
950 static const char *const f16_patt[] = {
951 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
954 static const char alt_3[] =
956 /* nopl 0(%[re]ax) */
957 static const char alt_4[] =
958 {0x0f,0x1f,0x40,0x00};
959 /* nopl 0(%[re]ax,%[re]ax,1) */
960 static const char alt_5[] =
961 {0x0f,0x1f,0x44,0x00,0x00};
962 /* nopw 0(%[re]ax,%[re]ax,1) */
963 static const char alt_6[] =
964 {0x66,0x0f,0x1f,0x44,0x00,0x00};
965 /* nopl 0L(%[re]ax) */
966 static const char alt_7[] =
967 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
968 /* nopl 0L(%[re]ax,%[re]ax,1) */
969 static const char alt_8[] =
970 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
971 /* nopw 0L(%[re]ax,%[re]ax,1) */
972 static const char alt_9[] =
973 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
974 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
975 static const char alt_10[] =
976 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
978 nopw %cs:0L(%[re]ax,%[re]ax,1) */
979 static const char alt_long_11[] =
981 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
984 nopw %cs:0L(%[re]ax,%[re]ax,1) */
985 static const char alt_long_12[] =
988 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
992 nopw %cs:0L(%[re]ax,%[re]ax,1) */
993 static const char alt_long_13[] =
997 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1002 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1003 static const char alt_long_14[] =
1008 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1014 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1015 static const char alt_long_15[] =
1021 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1022 /* nopl 0(%[re]ax,%[re]ax,1)
1023 nopw 0(%[re]ax,%[re]ax,1) */
1024 static const char alt_short_11[] =
1025 {0x0f,0x1f,0x44,0x00,0x00,
1026 0x66,0x0f,0x1f,0x44,0x00,0x00};
1027 /* nopw 0(%[re]ax,%[re]ax,1)
1028 nopw 0(%[re]ax,%[re]ax,1) */
1029 static const char alt_short_12[] =
1030 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1031 0x66,0x0f,0x1f,0x44,0x00,0x00};
1032 /* nopw 0(%[re]ax,%[re]ax,1)
1034 static const char alt_short_13[] =
1035 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1036 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1039 static const char alt_short_14[] =
1040 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1041 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1043 nopl 0L(%[re]ax,%[re]ax,1) */
1044 static const char alt_short_15[] =
1045 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1046 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1047 static const char *const alt_short_patt[] = {
1048 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1049 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1050 alt_short_14, alt_short_15
1052 static const char *const alt_long_patt[] = {
1053 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1054 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1055 alt_long_14, alt_long_15
1058 /* Only align for at least a positive non-zero boundary. */
1059 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1062 /* We need to decide which NOP sequence to use for 32bit and
1063 64bit. When -mtune= is used:
1065 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1066 PROCESSOR_GENERIC32, f32_patt will be used.
1067 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1068 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1069 PROCESSOR_GENERIC64, alt_long_patt will be used.
1070 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1071 PROCESSOR_AMDFAM10, PROCESSOR_BD and PROCESSOR_BT, alt_short_patt
1074 When -mtune= isn't used, alt_long_patt will be used if
1075 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1078 When -march= or .arch is used, we can't use anything beyond
1079 cpu_arch_isa_flags. */
1081 if (flag_code == CODE_16BIT)
1085 memcpy (fragP->fr_literal + fragP->fr_fix,
1087 /* Adjust jump offset. */
1088 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1091 memcpy (fragP->fr_literal + fragP->fr_fix,
1092 f16_patt[count - 1], count);
1096 const char *const *patt = NULL;
1098 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1100 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1101 switch (cpu_arch_tune)
1103 case PROCESSOR_UNKNOWN:
1104 /* We use cpu_arch_isa_flags to check if we SHOULD
1105 optimize with nops. */
1106 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1107 patt = alt_long_patt;
1111 case PROCESSOR_PENTIUM4:
1112 case PROCESSOR_NOCONA:
1113 case PROCESSOR_CORE:
1114 case PROCESSOR_CORE2:
1115 case PROCESSOR_COREI7:
1116 case PROCESSOR_L1OM:
1117 case PROCESSOR_K1OM:
1118 case PROCESSOR_GENERIC64:
1119 patt = alt_long_patt;
1122 case PROCESSOR_ATHLON:
1124 case PROCESSOR_AMDFAM10:
1127 patt = alt_short_patt;
1129 case PROCESSOR_I386:
1130 case PROCESSOR_I486:
1131 case PROCESSOR_PENTIUM:
1132 case PROCESSOR_PENTIUMPRO:
1133 case PROCESSOR_GENERIC32:
1140 switch (fragP->tc_frag_data.tune)
1142 case PROCESSOR_UNKNOWN:
1143 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1144 PROCESSOR_UNKNOWN. */
1148 case PROCESSOR_I386:
1149 case PROCESSOR_I486:
1150 case PROCESSOR_PENTIUM:
1152 case PROCESSOR_ATHLON:
1154 case PROCESSOR_AMDFAM10:
1157 case PROCESSOR_GENERIC32:
1158 /* We use cpu_arch_isa_flags to check if we CAN optimize
1160 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1161 patt = alt_short_patt;
1165 case PROCESSOR_PENTIUMPRO:
1166 case PROCESSOR_PENTIUM4:
1167 case PROCESSOR_NOCONA:
1168 case PROCESSOR_CORE:
1169 case PROCESSOR_CORE2:
1170 case PROCESSOR_COREI7:
1171 case PROCESSOR_L1OM:
1172 case PROCESSOR_K1OM:
1173 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1174 patt = alt_long_patt;
1178 case PROCESSOR_GENERIC64:
1179 patt = alt_long_patt;
1184 if (patt == f32_patt)
1186 /* If the padding is less than 15 bytes, we use the normal
1187 ones. Otherwise, we use a jump instruction and adjust
1191 /* For 64bit, the limit is 3 bytes. */
1192 if (flag_code == CODE_64BIT
1193 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1198 memcpy (fragP->fr_literal + fragP->fr_fix,
1199 patt[count - 1], count);
1202 memcpy (fragP->fr_literal + fragP->fr_fix,
1204 /* Adjust jump offset. */
1205 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1210 /* Maximum length of an instruction is 15 byte. If the
1211 padding is greater than 15 bytes and we don't use jump,
1212 we have to break it into smaller pieces. */
1213 int padding = count;
1214 while (padding > 15)
1217 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1222 memcpy (fragP->fr_literal + fragP->fr_fix,
1223 patt [padding - 1], padding);
1226 fragP->fr_var = count;
1230 operand_type_all_zero (const union i386_operand_type *x)
1232 switch (ARRAY_SIZE(x->array))
1241 return !x->array[0];
1248 operand_type_set (union i386_operand_type *x, unsigned int v)
1250 switch (ARRAY_SIZE(x->array))
1265 operand_type_equal (const union i386_operand_type *x,
1266 const union i386_operand_type *y)
1268 switch (ARRAY_SIZE(x->array))
1271 if (x->array[2] != y->array[2])
1274 if (x->array[1] != y->array[1])
1277 return x->array[0] == y->array[0];
1285 cpu_flags_all_zero (const union i386_cpu_flags *x)
1287 switch (ARRAY_SIZE(x->array))
1296 return !x->array[0];
1303 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1305 switch (ARRAY_SIZE(x->array))
1320 cpu_flags_equal (const union i386_cpu_flags *x,
1321 const union i386_cpu_flags *y)
1323 switch (ARRAY_SIZE(x->array))
1326 if (x->array[2] != y->array[2])
1329 if (x->array[1] != y->array[1])
1332 return x->array[0] == y->array[0];
1340 cpu_flags_check_cpu64 (i386_cpu_flags f)
1342 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1343 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1346 static INLINE i386_cpu_flags
1347 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1349 switch (ARRAY_SIZE (x.array))
1352 x.array [2] &= y.array [2];
1354 x.array [1] &= y.array [1];
1356 x.array [0] &= y.array [0];
1364 static INLINE i386_cpu_flags
1365 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1367 switch (ARRAY_SIZE (x.array))
1370 x.array [2] |= y.array [2];
1372 x.array [1] |= y.array [1];
1374 x.array [0] |= y.array [0];
1382 static INLINE i386_cpu_flags
1383 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1385 switch (ARRAY_SIZE (x.array))
1388 x.array [2] &= ~y.array [2];
1390 x.array [1] &= ~y.array [1];
1392 x.array [0] &= ~y.array [0];
1400 #define CPU_FLAGS_ARCH_MATCH 0x1
1401 #define CPU_FLAGS_64BIT_MATCH 0x2
1402 #define CPU_FLAGS_AES_MATCH 0x4
1403 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1404 #define CPU_FLAGS_AVX_MATCH 0x10
1406 #define CPU_FLAGS_32BIT_MATCH \
1407 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1408 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1409 #define CPU_FLAGS_PERFECT_MATCH \
1410 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1412 /* Return CPU flags match bits. */
1415 cpu_flags_match (const insn_template *t)
1417 i386_cpu_flags x = t->cpu_flags;
1418 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1420 x.bitfield.cpu64 = 0;
1421 x.bitfield.cpuno64 = 0;
1423 if (cpu_flags_all_zero (&x))
1425 /* This instruction is available on all archs. */
1426 match |= CPU_FLAGS_32BIT_MATCH;
1430 /* This instruction is available only on some archs. */
1431 i386_cpu_flags cpu = cpu_arch_flags;
1433 cpu.bitfield.cpu64 = 0;
1434 cpu.bitfield.cpuno64 = 0;
1435 cpu = cpu_flags_and (x, cpu);
1436 if (!cpu_flags_all_zero (&cpu))
1438 if (x.bitfield.cpuavx)
1440 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1441 if (cpu.bitfield.cpuavx)
1443 /* Check SSE2AVX. */
1444 if (!t->opcode_modifier.sse2avx|| sse2avx)
1446 match |= (CPU_FLAGS_ARCH_MATCH
1447 | CPU_FLAGS_AVX_MATCH);
1449 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1450 match |= CPU_FLAGS_AES_MATCH;
1452 if (!x.bitfield.cpupclmul
1453 || cpu.bitfield.cpupclmul)
1454 match |= CPU_FLAGS_PCLMUL_MATCH;
1458 match |= CPU_FLAGS_ARCH_MATCH;
1461 match |= CPU_FLAGS_32BIT_MATCH;
1467 static INLINE i386_operand_type
1468 operand_type_and (i386_operand_type x, i386_operand_type y)
1470 switch (ARRAY_SIZE (x.array))
1473 x.array [2] &= y.array [2];
1475 x.array [1] &= y.array [1];
1477 x.array [0] &= y.array [0];
1485 static INLINE i386_operand_type
1486 operand_type_or (i386_operand_type x, i386_operand_type y)
1488 switch (ARRAY_SIZE (x.array))
1491 x.array [2] |= y.array [2];
1493 x.array [1] |= y.array [1];
1495 x.array [0] |= y.array [0];
1503 static INLINE i386_operand_type
1504 operand_type_xor (i386_operand_type x, i386_operand_type y)
1506 switch (ARRAY_SIZE (x.array))
1509 x.array [2] ^= y.array [2];
1511 x.array [1] ^= y.array [1];
1513 x.array [0] ^= y.array [0];
1521 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1522 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1523 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1524 static const i386_operand_type inoutportreg
1525 = OPERAND_TYPE_INOUTPORTREG;
1526 static const i386_operand_type reg16_inoutportreg
1527 = OPERAND_TYPE_REG16_INOUTPORTREG;
1528 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1529 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1530 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1531 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1532 static const i386_operand_type anydisp
1533 = OPERAND_TYPE_ANYDISP;
1534 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1535 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1536 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1537 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1538 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1539 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1540 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1541 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1542 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1543 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1544 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1545 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1556 operand_type_check (i386_operand_type t, enum operand_type c)
1561 return (t.bitfield.reg8
1564 || t.bitfield.reg64);
1567 return (t.bitfield.imm8
1571 || t.bitfield.imm32s
1572 || t.bitfield.imm64);
1575 return (t.bitfield.disp8
1576 || t.bitfield.disp16
1577 || t.bitfield.disp32
1578 || t.bitfield.disp32s
1579 || t.bitfield.disp64);
1582 return (t.bitfield.disp8
1583 || t.bitfield.disp16
1584 || t.bitfield.disp32
1585 || t.bitfield.disp32s
1586 || t.bitfield.disp64
1587 || t.bitfield.baseindex);
1596 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1597 operand J for instruction template T. */
1600 match_reg_size (const insn_template *t, unsigned int j)
1602 return !((i.types[j].bitfield.byte
1603 && !t->operand_types[j].bitfield.byte)
1604 || (i.types[j].bitfield.word
1605 && !t->operand_types[j].bitfield.word)
1606 || (i.types[j].bitfield.dword
1607 && !t->operand_types[j].bitfield.dword)
1608 || (i.types[j].bitfield.qword
1609 && !t->operand_types[j].bitfield.qword));
1612 /* Return 1 if there is no conflict in any size on operand J for
1613 instruction template T. */
1616 match_mem_size (const insn_template *t, unsigned int j)
1618 return (match_reg_size (t, j)
1619 && !((i.types[j].bitfield.unspecified
1620 && !t->operand_types[j].bitfield.unspecified)
1621 || (i.types[j].bitfield.fword
1622 && !t->operand_types[j].bitfield.fword)
1623 || (i.types[j].bitfield.tbyte
1624 && !t->operand_types[j].bitfield.tbyte)
1625 || (i.types[j].bitfield.xmmword
1626 && !t->operand_types[j].bitfield.xmmword)
1627 || (i.types[j].bitfield.ymmword
1628 && !t->operand_types[j].bitfield.ymmword)));
1631 /* Return 1 if there is no size conflict on any operands for
1632 instruction template T. */
1635 operand_size_match (const insn_template *t)
1640 /* Don't check jump instructions. */
1641 if (t->opcode_modifier.jump
1642 || t->opcode_modifier.jumpbyte
1643 || t->opcode_modifier.jumpdword
1644 || t->opcode_modifier.jumpintersegment)
1647 /* Check memory and accumulator operand size. */
1648 for (j = 0; j < i.operands; j++)
1650 if (t->operand_types[j].bitfield.anysize)
1653 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1659 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1668 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1671 i.error = operand_size_mismatch;
1675 /* Check reverse. */
1676 gas_assert (i.operands == 2);
1679 for (j = 0; j < 2; j++)
1681 if (t->operand_types[j].bitfield.acc
1682 && !match_reg_size (t, j ? 0 : 1))
1685 if (i.types[j].bitfield.mem
1686 && !match_mem_size (t, j ? 0 : 1))
1694 operand_type_match (i386_operand_type overlap,
1695 i386_operand_type given)
1697 i386_operand_type temp = overlap;
1699 temp.bitfield.jumpabsolute = 0;
1700 temp.bitfield.unspecified = 0;
1701 temp.bitfield.byte = 0;
1702 temp.bitfield.word = 0;
1703 temp.bitfield.dword = 0;
1704 temp.bitfield.fword = 0;
1705 temp.bitfield.qword = 0;
1706 temp.bitfield.tbyte = 0;
1707 temp.bitfield.xmmword = 0;
1708 temp.bitfield.ymmword = 0;
1709 if (operand_type_all_zero (&temp))
1712 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1713 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1717 i.error = operand_type_mismatch;
1721 /* If given types g0 and g1 are registers they must be of the same type
1722 unless the expected operand type register overlap is null.
1723 Note that Acc in a template matches every size of reg. */
1726 operand_type_register_match (i386_operand_type m0,
1727 i386_operand_type g0,
1728 i386_operand_type t0,
1729 i386_operand_type m1,
1730 i386_operand_type g1,
1731 i386_operand_type t1)
1733 if (!operand_type_check (g0, reg))
1736 if (!operand_type_check (g1, reg))
1739 if (g0.bitfield.reg8 == g1.bitfield.reg8
1740 && g0.bitfield.reg16 == g1.bitfield.reg16
1741 && g0.bitfield.reg32 == g1.bitfield.reg32
1742 && g0.bitfield.reg64 == g1.bitfield.reg64)
1745 if (m0.bitfield.acc)
1747 t0.bitfield.reg8 = 1;
1748 t0.bitfield.reg16 = 1;
1749 t0.bitfield.reg32 = 1;
1750 t0.bitfield.reg64 = 1;
1753 if (m1.bitfield.acc)
1755 t1.bitfield.reg8 = 1;
1756 t1.bitfield.reg16 = 1;
1757 t1.bitfield.reg32 = 1;
1758 t1.bitfield.reg64 = 1;
1761 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1762 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1763 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1764 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1767 i.error = register_type_mismatch;
1772 static INLINE unsigned int
1773 register_number (const reg_entry *r)
1775 unsigned int nr = r->reg_num;
1777 if (r->reg_flags & RegRex)
1783 static INLINE unsigned int
1784 mode_from_disp_size (i386_operand_type t)
1786 if (t.bitfield.disp8)
1788 else if (t.bitfield.disp16
1789 || t.bitfield.disp32
1790 || t.bitfield.disp32s)
1797 fits_in_signed_byte (offsetT num)
1799 return (num >= -128) && (num <= 127);
1803 fits_in_unsigned_byte (offsetT num)
1805 return (num & 0xff) == num;
1809 fits_in_unsigned_word (offsetT num)
1811 return (num & 0xffff) == num;
1815 fits_in_signed_word (offsetT num)
1817 return (-32768 <= num) && (num <= 32767);
1821 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1826 return (!(((offsetT) -1 << 31) & num)
1827 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1829 } /* fits_in_signed_long() */
1832 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1837 return (num & (((offsetT) 2 << 31) - 1)) == num;
1839 } /* fits_in_unsigned_long() */
1842 fits_in_imm4 (offsetT num)
1844 return (num & 0xf) == num;
1847 static i386_operand_type
1848 smallest_imm_type (offsetT num)
1850 i386_operand_type t;
1852 operand_type_set (&t, 0);
1853 t.bitfield.imm64 = 1;
1855 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1857 /* This code is disabled on the 486 because all the Imm1 forms
1858 in the opcode table are slower on the i486. They're the
1859 versions with the implicitly specified single-position
1860 displacement, which has another syntax if you really want to
1862 t.bitfield.imm1 = 1;
1863 t.bitfield.imm8 = 1;
1864 t.bitfield.imm8s = 1;
1865 t.bitfield.imm16 = 1;
1866 t.bitfield.imm32 = 1;
1867 t.bitfield.imm32s = 1;
1869 else if (fits_in_signed_byte (num))
1871 t.bitfield.imm8 = 1;
1872 t.bitfield.imm8s = 1;
1873 t.bitfield.imm16 = 1;
1874 t.bitfield.imm32 = 1;
1875 t.bitfield.imm32s = 1;
1877 else if (fits_in_unsigned_byte (num))
1879 t.bitfield.imm8 = 1;
1880 t.bitfield.imm16 = 1;
1881 t.bitfield.imm32 = 1;
1882 t.bitfield.imm32s = 1;
1884 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1886 t.bitfield.imm16 = 1;
1887 t.bitfield.imm32 = 1;
1888 t.bitfield.imm32s = 1;
1890 else if (fits_in_signed_long (num))
1892 t.bitfield.imm32 = 1;
1893 t.bitfield.imm32s = 1;
1895 else if (fits_in_unsigned_long (num))
1896 t.bitfield.imm32 = 1;
1902 offset_in_range (offsetT val, int size)
1908 case 1: mask = ((addressT) 1 << 8) - 1; break;
1909 case 2: mask = ((addressT) 1 << 16) - 1; break;
1910 case 4: mask = ((addressT) 2 << 31) - 1; break;
1912 case 8: mask = ((addressT) 2 << 63) - 1; break;
1918 /* If BFD64, sign extend val for 32bit address mode. */
1919 if (flag_code != CODE_64BIT
1920 || i.prefix[ADDR_PREFIX])
1921 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1922 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1925 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1927 char buf1[40], buf2[40];
1929 sprint_value (buf1, val);
1930 sprint_value (buf2, val & mask);
1931 as_warn (_("%s shortened to %s"), buf1, buf2);
1945 a. PREFIX_EXIST if attempting to add a prefix where one from the
1946 same class already exists.
1947 b. PREFIX_LOCK if lock prefix is added.
1948 c. PREFIX_REP if rep/repne prefix is added.
1949 d. PREFIX_OTHER if other prefix is added.
1952 static enum PREFIX_GROUP
1953 add_prefix (unsigned int prefix)
1955 enum PREFIX_GROUP ret = PREFIX_OTHER;
1958 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1959 && flag_code == CODE_64BIT)
1961 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1962 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1963 && (prefix & (REX_R | REX_X | REX_B))))
1974 case CS_PREFIX_OPCODE:
1975 case DS_PREFIX_OPCODE:
1976 case ES_PREFIX_OPCODE:
1977 case FS_PREFIX_OPCODE:
1978 case GS_PREFIX_OPCODE:
1979 case SS_PREFIX_OPCODE:
1983 case REPNE_PREFIX_OPCODE:
1984 case REPE_PREFIX_OPCODE:
1989 case LOCK_PREFIX_OPCODE:
1998 case ADDR_PREFIX_OPCODE:
2002 case DATA_PREFIX_OPCODE:
2006 if (i.prefix[q] != 0)
2014 i.prefix[q] |= prefix;
2017 as_bad (_("same type of prefix used twice"));
2023 update_code_flag (int value, int check)
2025 PRINTF_LIKE ((*as_error));
2027 flag_code = (enum flag_code) value;
2028 if (flag_code == CODE_64BIT)
2030 cpu_arch_flags.bitfield.cpu64 = 1;
2031 cpu_arch_flags.bitfield.cpuno64 = 0;
2035 cpu_arch_flags.bitfield.cpu64 = 0;
2036 cpu_arch_flags.bitfield.cpuno64 = 1;
2038 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2041 as_error = as_fatal;
2044 (*as_error) (_("64bit mode not supported on `%s'."),
2045 cpu_arch_name ? cpu_arch_name : default_arch);
2047 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2050 as_error = as_fatal;
2053 (*as_error) (_("32bit mode not supported on `%s'."),
2054 cpu_arch_name ? cpu_arch_name : default_arch);
2056 stackop_size = '\0';
2060 set_code_flag (int value)
2062 update_code_flag (value, 0);
2066 set_16bit_gcc_code_flag (int new_code_flag)
2068 flag_code = (enum flag_code) new_code_flag;
2069 if (flag_code != CODE_16BIT)
2071 cpu_arch_flags.bitfield.cpu64 = 0;
2072 cpu_arch_flags.bitfield.cpuno64 = 1;
2073 stackop_size = LONG_MNEM_SUFFIX;
2077 set_intel_syntax (int syntax_flag)
2079 /* Find out if register prefixing is specified. */
2080 int ask_naked_reg = 0;
2083 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2085 char *string = input_line_pointer;
2086 int e = get_symbol_end ();
2088 if (strcmp (string, "prefix") == 0)
2090 else if (strcmp (string, "noprefix") == 0)
2093 as_bad (_("bad argument to syntax directive."));
2094 *input_line_pointer = e;
2096 demand_empty_rest_of_line ();
2098 intel_syntax = syntax_flag;
2100 if (ask_naked_reg == 0)
2101 allow_naked_reg = (intel_syntax
2102 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2104 allow_naked_reg = (ask_naked_reg < 0);
2106 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2108 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2109 identifier_chars['$'] = intel_syntax ? '$' : 0;
2110 register_prefix = allow_naked_reg ? "" : "%";
2114 set_intel_mnemonic (int mnemonic_flag)
2116 intel_mnemonic = mnemonic_flag;
2120 set_allow_index_reg (int flag)
2122 allow_index_reg = flag;
2126 set_check (int what)
2128 enum check_kind *kind;
2133 kind = &operand_check;
2144 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2146 char *string = input_line_pointer;
2147 int e = get_symbol_end ();
2149 if (strcmp (string, "none") == 0)
2151 else if (strcmp (string, "warning") == 0)
2152 *kind = check_warning;
2153 else if (strcmp (string, "error") == 0)
2154 *kind = check_error;
2156 as_bad (_("bad argument to %s_check directive."), str);
2157 *input_line_pointer = e;
2160 as_bad (_("missing argument for %s_check directive"), str);
2162 demand_empty_rest_of_line ();
2166 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2167 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2169 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2170 static const char *arch;
2172 /* Intel LIOM is only supported on ELF. */
2178 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2179 use default_arch. */
2180 arch = cpu_arch_name;
2182 arch = default_arch;
2185 /* If we are targeting Intel L1OM, we must enable it. */
2186 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2187 || new_flag.bitfield.cpul1om)
2190 /* If we are targeting Intel K1OM, we must enable it. */
2191 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2192 || new_flag.bitfield.cpuk1om)
2195 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2200 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2204 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2206 char *string = input_line_pointer;
2207 int e = get_symbol_end ();
2209 i386_cpu_flags flags;
2211 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2213 if (strcmp (string, cpu_arch[j].name) == 0)
2215 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2219 cpu_arch_name = cpu_arch[j].name;
2220 cpu_sub_arch_name = NULL;
2221 cpu_arch_flags = cpu_arch[j].flags;
2222 if (flag_code == CODE_64BIT)
2224 cpu_arch_flags.bitfield.cpu64 = 1;
2225 cpu_arch_flags.bitfield.cpuno64 = 0;
2229 cpu_arch_flags.bitfield.cpu64 = 0;
2230 cpu_arch_flags.bitfield.cpuno64 = 1;
2232 cpu_arch_isa = cpu_arch[j].type;
2233 cpu_arch_isa_flags = cpu_arch[j].flags;
2234 if (!cpu_arch_tune_set)
2236 cpu_arch_tune = cpu_arch_isa;
2237 cpu_arch_tune_flags = cpu_arch_isa_flags;
2242 if (!cpu_arch[j].negated)
2243 flags = cpu_flags_or (cpu_arch_flags,
2246 flags = cpu_flags_and_not (cpu_arch_flags,
2248 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2250 if (cpu_sub_arch_name)
2252 char *name = cpu_sub_arch_name;
2253 cpu_sub_arch_name = concat (name,
2255 (const char *) NULL);
2259 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2260 cpu_arch_flags = flags;
2261 cpu_arch_isa_flags = flags;
2263 *input_line_pointer = e;
2264 demand_empty_rest_of_line ();
2268 if (j >= ARRAY_SIZE (cpu_arch))
2269 as_bad (_("no such architecture: `%s'"), string);
2271 *input_line_pointer = e;
2274 as_bad (_("missing cpu architecture"));
2276 no_cond_jump_promotion = 0;
2277 if (*input_line_pointer == ','
2278 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2280 char *string = ++input_line_pointer;
2281 int e = get_symbol_end ();
2283 if (strcmp (string, "nojumps") == 0)
2284 no_cond_jump_promotion = 1;
2285 else if (strcmp (string, "jumps") == 0)
2288 as_bad (_("no such architecture modifier: `%s'"), string);
2290 *input_line_pointer = e;
2293 demand_empty_rest_of_line ();
2296 enum bfd_architecture
2299 if (cpu_arch_isa == PROCESSOR_L1OM)
2301 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2302 || flag_code != CODE_64BIT)
2303 as_fatal (_("Intel L1OM is 64bit ELF only"));
2304 return bfd_arch_l1om;
2306 else if (cpu_arch_isa == PROCESSOR_K1OM)
2308 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2309 || flag_code != CODE_64BIT)
2310 as_fatal (_("Intel K1OM is 64bit ELF only"));
2311 return bfd_arch_k1om;
2314 return bfd_arch_i386;
2320 if (!strncmp (default_arch, "x86_64", 6))
2322 if (cpu_arch_isa == PROCESSOR_L1OM)
2324 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2325 || default_arch[6] != '\0')
2326 as_fatal (_("Intel L1OM is 64bit ELF only"));
2327 return bfd_mach_l1om;
2329 else if (cpu_arch_isa == PROCESSOR_K1OM)
2331 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2332 || default_arch[6] != '\0')
2333 as_fatal (_("Intel K1OM is 64bit ELF only"));
2334 return bfd_mach_k1om;
2336 else if (default_arch[6] == '\0')
2337 return bfd_mach_x86_64;
2339 return bfd_mach_x64_32;
2341 else if (!strcmp (default_arch, "i386"))
2342 return bfd_mach_i386_i386;
2344 as_fatal (_("unknown architecture"));
2350 const char *hash_err;
2352 /* Initialize op_hash hash table. */
2353 op_hash = hash_new ();
2356 const insn_template *optab;
2357 templates *core_optab;
2359 /* Setup for loop. */
2361 core_optab = (templates *) xmalloc (sizeof (templates));
2362 core_optab->start = optab;
2367 if (optab->name == NULL
2368 || strcmp (optab->name, (optab - 1)->name) != 0)
2370 /* different name --> ship out current template list;
2371 add to hash table; & begin anew. */
2372 core_optab->end = optab;
2373 hash_err = hash_insert (op_hash,
2375 (void *) core_optab);
2378 as_fatal (_("can't hash %s: %s"),
2382 if (optab->name == NULL)
2384 core_optab = (templates *) xmalloc (sizeof (templates));
2385 core_optab->start = optab;
2390 /* Initialize reg_hash hash table. */
2391 reg_hash = hash_new ();
2393 const reg_entry *regtab;
2394 unsigned int regtab_size = i386_regtab_size;
2396 for (regtab = i386_regtab; regtab_size--; regtab++)
2398 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2400 as_fatal (_("can't hash %s: %s"),
2406 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2411 for (c = 0; c < 256; c++)
2416 mnemonic_chars[c] = c;
2417 register_chars[c] = c;
2418 operand_chars[c] = c;
2420 else if (ISLOWER (c))
2422 mnemonic_chars[c] = c;
2423 register_chars[c] = c;
2424 operand_chars[c] = c;
2426 else if (ISUPPER (c))
2428 mnemonic_chars[c] = TOLOWER (c);
2429 register_chars[c] = mnemonic_chars[c];
2430 operand_chars[c] = c;
2433 if (ISALPHA (c) || ISDIGIT (c))
2434 identifier_chars[c] = c;
2437 identifier_chars[c] = c;
2438 operand_chars[c] = c;
2443 identifier_chars['@'] = '@';
2446 identifier_chars['?'] = '?';
2447 operand_chars['?'] = '?';
2449 digit_chars['-'] = '-';
2450 mnemonic_chars['_'] = '_';
2451 mnemonic_chars['-'] = '-';
2452 mnemonic_chars['.'] = '.';
2453 identifier_chars['_'] = '_';
2454 identifier_chars['.'] = '.';
2456 for (p = operand_special_chars; *p != '\0'; p++)
2457 operand_chars[(unsigned char) *p] = *p;
2460 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2463 record_alignment (text_section, 2);
2464 record_alignment (data_section, 2);
2465 record_alignment (bss_section, 2);
2469 if (flag_code == CODE_64BIT)
2471 #if defined (OBJ_COFF) && defined (TE_PE)
2472 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2475 x86_dwarf2_return_column = 16;
2477 x86_cie_data_alignment = -8;
2481 x86_dwarf2_return_column = 8;
2482 x86_cie_data_alignment = -4;
2487 i386_print_statistics (FILE *file)
2489 hash_print_statistics (file, "i386 opcode", op_hash);
2490 hash_print_statistics (file, "i386 register", reg_hash);
2495 /* Debugging routines for md_assemble. */
2496 static void pte (insn_template *);
2497 static void pt (i386_operand_type);
2498 static void pe (expressionS *);
2499 static void ps (symbolS *);
2502 pi (char *line, i386_insn *x)
2506 fprintf (stdout, "%s: template ", line);
2508 fprintf (stdout, " address: base %s index %s scale %x\n",
2509 x->base_reg ? x->base_reg->reg_name : "none",
2510 x->index_reg ? x->index_reg->reg_name : "none",
2511 x->log2_scale_factor);
2512 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2513 x->rm.mode, x->rm.reg, x->rm.regmem);
2514 fprintf (stdout, " sib: base %x index %x scale %x\n",
2515 x->sib.base, x->sib.index, x->sib.scale);
2516 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2517 (x->rex & REX_W) != 0,
2518 (x->rex & REX_R) != 0,
2519 (x->rex & REX_X) != 0,
2520 (x->rex & REX_B) != 0);
2521 for (j = 0; j < x->operands; j++)
2523 fprintf (stdout, " #%d: ", j + 1);
2525 fprintf (stdout, "\n");
2526 if (x->types[j].bitfield.reg8
2527 || x->types[j].bitfield.reg16
2528 || x->types[j].bitfield.reg32
2529 || x->types[j].bitfield.reg64
2530 || x->types[j].bitfield.regmmx
2531 || x->types[j].bitfield.regxmm
2532 || x->types[j].bitfield.regymm
2533 || x->types[j].bitfield.sreg2
2534 || x->types[j].bitfield.sreg3
2535 || x->types[j].bitfield.control
2536 || x->types[j].bitfield.debug
2537 || x->types[j].bitfield.test)
2538 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2539 if (operand_type_check (x->types[j], imm))
2541 if (operand_type_check (x->types[j], disp))
2542 pe (x->op[j].disps);
2547 pte (insn_template *t)
2550 fprintf (stdout, " %d operands ", t->operands);
2551 fprintf (stdout, "opcode %x ", t->base_opcode);
2552 if (t->extension_opcode != None)
2553 fprintf (stdout, "ext %x ", t->extension_opcode);
2554 if (t->opcode_modifier.d)
2555 fprintf (stdout, "D");
2556 if (t->opcode_modifier.w)
2557 fprintf (stdout, "W");
2558 fprintf (stdout, "\n");
2559 for (j = 0; j < t->operands; j++)
2561 fprintf (stdout, " #%d type ", j + 1);
2562 pt (t->operand_types[j]);
2563 fprintf (stdout, "\n");
2570 fprintf (stdout, " operation %d\n", e->X_op);
2571 fprintf (stdout, " add_number %ld (%lx)\n",
2572 (long) e->X_add_number, (long) e->X_add_number);
2573 if (e->X_add_symbol)
2575 fprintf (stdout, " add_symbol ");
2576 ps (e->X_add_symbol);
2577 fprintf (stdout, "\n");
2581 fprintf (stdout, " op_symbol ");
2582 ps (e->X_op_symbol);
2583 fprintf (stdout, "\n");
2590 fprintf (stdout, "%s type %s%s",
2592 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2593 segment_name (S_GET_SEGMENT (s)));
2596 static struct type_name
2598 i386_operand_type mask;
2601 const type_names[] =
2603 { OPERAND_TYPE_REG8, "r8" },
2604 { OPERAND_TYPE_REG16, "r16" },
2605 { OPERAND_TYPE_REG32, "r32" },
2606 { OPERAND_TYPE_REG64, "r64" },
2607 { OPERAND_TYPE_IMM8, "i8" },
2608 { OPERAND_TYPE_IMM8, "i8s" },
2609 { OPERAND_TYPE_IMM16, "i16" },
2610 { OPERAND_TYPE_IMM32, "i32" },
2611 { OPERAND_TYPE_IMM32S, "i32s" },
2612 { OPERAND_TYPE_IMM64, "i64" },
2613 { OPERAND_TYPE_IMM1, "i1" },
2614 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2615 { OPERAND_TYPE_DISP8, "d8" },
2616 { OPERAND_TYPE_DISP16, "d16" },
2617 { OPERAND_TYPE_DISP32, "d32" },
2618 { OPERAND_TYPE_DISP32S, "d32s" },
2619 { OPERAND_TYPE_DISP64, "d64" },
2620 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2621 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2622 { OPERAND_TYPE_CONTROL, "control reg" },
2623 { OPERAND_TYPE_TEST, "test reg" },
2624 { OPERAND_TYPE_DEBUG, "debug reg" },
2625 { OPERAND_TYPE_FLOATREG, "FReg" },
2626 { OPERAND_TYPE_FLOATACC, "FAcc" },
2627 { OPERAND_TYPE_SREG2, "SReg2" },
2628 { OPERAND_TYPE_SREG3, "SReg3" },
2629 { OPERAND_TYPE_ACC, "Acc" },
2630 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2631 { OPERAND_TYPE_REGMMX, "rMMX" },
2632 { OPERAND_TYPE_REGXMM, "rXMM" },
2633 { OPERAND_TYPE_REGYMM, "rYMM" },
2634 { OPERAND_TYPE_ESSEG, "es" },
2638 pt (i386_operand_type t)
2641 i386_operand_type a;
2643 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2645 a = operand_type_and (t, type_names[j].mask);
2646 if (!operand_type_all_zero (&a))
2647 fprintf (stdout, "%s, ", type_names[j].name);
2652 #endif /* DEBUG386 */
2654 static bfd_reloc_code_real_type
2655 reloc (unsigned int size,
2658 bfd_reloc_code_real_type other)
2660 if (other != NO_RELOC)
2662 reloc_howto_type *rel;
2667 case BFD_RELOC_X86_64_GOT32:
2668 return BFD_RELOC_X86_64_GOT64;
2670 case BFD_RELOC_X86_64_PLTOFF64:
2671 return BFD_RELOC_X86_64_PLTOFF64;
2673 case BFD_RELOC_X86_64_GOTPC32:
2674 other = BFD_RELOC_X86_64_GOTPC64;
2676 case BFD_RELOC_X86_64_GOTPCREL:
2677 other = BFD_RELOC_X86_64_GOTPCREL64;
2679 case BFD_RELOC_X86_64_TPOFF32:
2680 other = BFD_RELOC_X86_64_TPOFF64;
2682 case BFD_RELOC_X86_64_DTPOFF32:
2683 other = BFD_RELOC_X86_64_DTPOFF64;
2689 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2690 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2693 rel = bfd_reloc_type_lookup (stdoutput, other);
2695 as_bad (_("unknown relocation (%u)"), other);
2696 else if (size != bfd_get_reloc_size (rel))
2697 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2698 bfd_get_reloc_size (rel),
2700 else if (pcrel && !rel->pc_relative)
2701 as_bad (_("non-pc-relative relocation for pc-relative field"));
2702 else if ((rel->complain_on_overflow == complain_overflow_signed
2704 || (rel->complain_on_overflow == complain_overflow_unsigned
2706 as_bad (_("relocated field and relocation type differ in signedness"));
2715 as_bad (_("there are no unsigned pc-relative relocations"));
2718 case 1: return BFD_RELOC_8_PCREL;
2719 case 2: return BFD_RELOC_16_PCREL;
2720 case 4: return BFD_RELOC_32_PCREL;
2721 case 8: return BFD_RELOC_64_PCREL;
2723 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2730 case 4: return BFD_RELOC_X86_64_32S;
2735 case 1: return BFD_RELOC_8;
2736 case 2: return BFD_RELOC_16;
2737 case 4: return BFD_RELOC_32;
2738 case 8: return BFD_RELOC_64;
2740 as_bad (_("cannot do %s %u byte relocation"),
2741 sign > 0 ? "signed" : "unsigned", size);
2747 /* Here we decide which fixups can be adjusted to make them relative to
2748 the beginning of the section instead of the symbol. Basically we need
2749 to make sure that the dynamic relocations are done correctly, so in
2750 some cases we force the original symbol to be used. */
2753 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2755 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2759 /* Don't adjust pc-relative references to merge sections in 64-bit
2761 if (use_rela_relocations
2762 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2766 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2767 and changed later by validate_fix. */
2768 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2769 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2772 /* adjust_reloc_syms doesn't know about the GOT. */
2773 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2774 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2775 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2776 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2777 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2778 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2779 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2780 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2781 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2782 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2783 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2784 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2785 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2786 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2787 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2788 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2789 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2790 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2791 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2792 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2793 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2794 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2795 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2796 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2797 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2798 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2799 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2800 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2807 intel_float_operand (const char *mnemonic)
2809 /* Note that the value returned is meaningful only for opcodes with (memory)
2810 operands, hence the code here is free to improperly handle opcodes that
2811 have no operands (for better performance and smaller code). */
2813 if (mnemonic[0] != 'f')
2814 return 0; /* non-math */
2816 switch (mnemonic[1])
2818 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2819 the fs segment override prefix not currently handled because no
2820 call path can make opcodes without operands get here */
2822 return 2 /* integer op */;
2824 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2825 return 3; /* fldcw/fldenv */
2828 if (mnemonic[2] != 'o' /* fnop */)
2829 return 3; /* non-waiting control op */
2832 if (mnemonic[2] == 's')
2833 return 3; /* frstor/frstpm */
2836 if (mnemonic[2] == 'a')
2837 return 3; /* fsave */
2838 if (mnemonic[2] == 't')
2840 switch (mnemonic[3])
2842 case 'c': /* fstcw */
2843 case 'd': /* fstdw */
2844 case 'e': /* fstenv */
2845 case 's': /* fsts[gw] */
2851 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2852 return 0; /* fxsave/fxrstor are not really math ops */
2859 /* Build the VEX prefix. */
2862 build_vex_prefix (const insn_template *t)
2864 unsigned int register_specifier;
2865 unsigned int implied_prefix;
2866 unsigned int vector_length;
2868 /* Check register specifier. */
2869 if (i.vex.register_specifier)
2870 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2872 register_specifier = 0xf;
2874 /* Use 2-byte VEX prefix by swappping destination and source
2877 && i.operands == i.reg_operands
2878 && i.tm.opcode_modifier.vexopcode == VEX0F
2879 && i.tm.opcode_modifier.s
2882 unsigned int xchg = i.operands - 1;
2883 union i386_op temp_op;
2884 i386_operand_type temp_type;
2886 temp_type = i.types[xchg];
2887 i.types[xchg] = i.types[0];
2888 i.types[0] = temp_type;
2889 temp_op = i.op[xchg];
2890 i.op[xchg] = i.op[0];
2893 gas_assert (i.rm.mode == 3);
2897 i.rm.regmem = i.rm.reg;
2900 /* Use the next insn. */
2904 if (i.tm.opcode_modifier.vex == VEXScalar)
2905 vector_length = avxscalar;
2907 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2909 switch ((i.tm.base_opcode >> 8) & 0xff)
2914 case DATA_PREFIX_OPCODE:
2917 case REPE_PREFIX_OPCODE:
2920 case REPNE_PREFIX_OPCODE:
2927 /* Use 2-byte VEX prefix if possible. */
2928 if (i.tm.opcode_modifier.vexopcode == VEX0F
2929 && i.tm.opcode_modifier.vexw != VEXW1
2930 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2932 /* 2-byte VEX prefix. */
2936 i.vex.bytes[0] = 0xc5;
2938 /* Check the REX.R bit. */
2939 r = (i.rex & REX_R) ? 0 : 1;
2940 i.vex.bytes[1] = (r << 7
2941 | register_specifier << 3
2942 | vector_length << 2
2947 /* 3-byte VEX prefix. */
2952 switch (i.tm.opcode_modifier.vexopcode)
2956 i.vex.bytes[0] = 0xc4;
2960 i.vex.bytes[0] = 0xc4;
2964 i.vex.bytes[0] = 0xc4;
2968 i.vex.bytes[0] = 0x8f;
2972 i.vex.bytes[0] = 0x8f;
2976 i.vex.bytes[0] = 0x8f;
2982 /* The high 3 bits of the second VEX byte are 1's compliment
2983 of RXB bits from REX. */
2984 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2986 /* Check the REX.W bit. */
2987 w = (i.rex & REX_W) ? 1 : 0;
2988 if (i.tm.opcode_modifier.vexw)
2993 if (i.tm.opcode_modifier.vexw == VEXW1)
2997 i.vex.bytes[2] = (w << 7
2998 | register_specifier << 3
2999 | vector_length << 2
3005 process_immext (void)
3009 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3012 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3013 with an opcode suffix which is coded in the same place as an
3014 8-bit immediate field would be.
3015 Here we check those operands and remove them afterwards. */
3018 for (x = 0; x < i.operands; x++)
3019 if (register_number (i.op[x].regs) != x)
3020 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3021 register_prefix, i.op[x].regs->reg_name, x + 1,
3027 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3028 which is coded in the same place as an 8-bit immediate field
3029 would be. Here we fake an 8-bit immediate operand from the
3030 opcode suffix stored in tm.extension_opcode.
3032 AVX instructions also use this encoding, for some of
3033 3 argument instructions. */
3035 gas_assert (i.imm_operands == 0
3037 || (i.tm.opcode_modifier.vex
3038 && i.operands <= 4)));
3040 exp = &im_expressions[i.imm_operands++];
3041 i.op[i.operands].imms = exp;
3042 i.types[i.operands] = imm8;
3044 exp->X_op = O_constant;
3045 exp->X_add_number = i.tm.extension_opcode;
3046 i.tm.extension_opcode = None;
3053 switch (i.tm.opcode_modifier.hleprefixok)
3058 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3059 as_bad (_("invalid instruction `%s' after `xacquire'"),
3062 as_bad (_("invalid instruction `%s' after `xrelease'"),
3066 if (i.prefix[LOCK_PREFIX])
3068 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3069 as_bad (_("missing `lock' with `xacquire'"));
3071 as_bad (_("missing `lock' with `xrelease'"));
3075 case HLEPrefixRelease:
3076 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3078 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3082 if (i.mem_operands == 0
3083 || !operand_type_check (i.types[i.operands - 1], anymem))
3085 as_bad (_("memory destination needed for instruction `%s'"
3086 " after `xrelease'"), i.tm.name);
3093 /* This is the guts of the machine-dependent assembler. LINE points to a
3094 machine dependent instruction. This function is supposed to emit
3095 the frags/bytes it assembles to. */
3098 md_assemble (char *line)
3101 char mnemonic[MAX_MNEM_SIZE];
3102 const insn_template *t;
3104 /* Initialize globals. */
3105 memset (&i, '\0', sizeof (i));
3106 for (j = 0; j < MAX_OPERANDS; j++)
3107 i.reloc[j] = NO_RELOC;
3108 memset (disp_expressions, '\0', sizeof (disp_expressions));
3109 memset (im_expressions, '\0', sizeof (im_expressions));
3110 save_stack_p = save_stack;
3112 /* First parse an instruction mnemonic & call i386_operand for the operands.
3113 We assume that the scrubber has arranged it so that line[0] is the valid
3114 start of a (possibly prefixed) mnemonic. */
3116 line = parse_insn (line, mnemonic);
3120 line = parse_operands (line, mnemonic);
3125 /* Now we've parsed the mnemonic into a set of templates, and have the
3126 operands at hand. */
3128 /* All intel opcodes have reversed operands except for "bound" and
3129 "enter". We also don't reverse intersegment "jmp" and "call"
3130 instructions with 2 immediate operands so that the immediate segment
3131 precedes the offset, as it does when in AT&T mode. */
3134 && (strcmp (mnemonic, "bound") != 0)
3135 && (strcmp (mnemonic, "invlpga") != 0)
3136 && !(operand_type_check (i.types[0], imm)
3137 && operand_type_check (i.types[1], imm)))
3140 /* The order of the immediates should be reversed
3141 for 2 immediates extrq and insertq instructions */
3142 if (i.imm_operands == 2
3143 && (strcmp (mnemonic, "extrq") == 0
3144 || strcmp (mnemonic, "insertq") == 0))
3145 swap_2_operands (0, 1);
3150 /* Don't optimize displacement for movabs since it only takes 64bit
3153 && i.disp_encoding != disp_encoding_32bit
3154 && (flag_code != CODE_64BIT
3155 || strcmp (mnemonic, "movabs") != 0))
3158 /* Next, we find a template that matches the given insn,
3159 making sure the overlap of the given operands types is consistent
3160 with the template operand types. */
3162 if (!(t = match_template ()))
3165 if (sse_check != check_none
3166 && !i.tm.opcode_modifier.noavx
3167 && (i.tm.cpu_flags.bitfield.cpusse
3168 || i.tm.cpu_flags.bitfield.cpusse2
3169 || i.tm.cpu_flags.bitfield.cpusse3
3170 || i.tm.cpu_flags.bitfield.cpussse3
3171 || i.tm.cpu_flags.bitfield.cpusse4_1
3172 || i.tm.cpu_flags.bitfield.cpusse4_2))
3174 (sse_check == check_warning
3176 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3179 /* Zap movzx and movsx suffix. The suffix has been set from
3180 "word ptr" or "byte ptr" on the source operand in Intel syntax
3181 or extracted from mnemonic in AT&T syntax. But we'll use
3182 the destination register to choose the suffix for encoding. */
3183 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3185 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3186 there is no suffix, the default will be byte extension. */
3187 if (i.reg_operands != 2
3190 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3195 if (i.tm.opcode_modifier.fwait)
3196 if (!add_prefix (FWAIT_OPCODE))
3199 /* Check for lock without a lockable instruction. Destination operand
3200 must be memory unless it is xchg (0x86). */
3201 if (i.prefix[LOCK_PREFIX]
3202 && (!i.tm.opcode_modifier.islockable
3203 || i.mem_operands == 0
3204 || (i.tm.base_opcode != 0x86
3205 && !operand_type_check (i.types[i.operands - 1], anymem))))
3207 as_bad (_("expecting lockable instruction after `lock'"));
3211 /* Check if HLE prefix is OK. */
3212 if (i.have_hle && !check_hle ())
3215 /* Check string instruction segment overrides. */
3216 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3218 if (!check_string ())
3220 i.disp_operands = 0;
3223 if (!process_suffix ())
3226 /* Update operand types. */
3227 for (j = 0; j < i.operands; j++)
3228 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3230 /* Make still unresolved immediate matches conform to size of immediate
3231 given in i.suffix. */
3232 if (!finalize_imm ())
3235 if (i.types[0].bitfield.imm1)
3236 i.imm_operands = 0; /* kludge for shift insns. */
3238 /* We only need to check those implicit registers for instructions
3239 with 3 operands or less. */
3240 if (i.operands <= 3)
3241 for (j = 0; j < i.operands; j++)
3242 if (i.types[j].bitfield.inoutportreg
3243 || i.types[j].bitfield.shiftcount
3244 || i.types[j].bitfield.acc
3245 || i.types[j].bitfield.floatacc)
3248 /* ImmExt should be processed after SSE2AVX. */
3249 if (!i.tm.opcode_modifier.sse2avx
3250 && i.tm.opcode_modifier.immext)
3253 /* For insns with operands there are more diddles to do to the opcode. */
3256 if (!process_operands ())
3259 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3261 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3262 as_warn (_("translating to `%sp'"), i.tm.name);
3265 if (i.tm.opcode_modifier.vex)
3266 build_vex_prefix (t);
3268 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3269 instructions may define INT_OPCODE as well, so avoid this corner
3270 case for those instructions that use MODRM. */
3271 if (i.tm.base_opcode == INT_OPCODE
3272 && !i.tm.opcode_modifier.modrm
3273 && i.op[0].imms->X_add_number == 3)
3275 i.tm.base_opcode = INT3_OPCODE;
3279 if ((i.tm.opcode_modifier.jump
3280 || i.tm.opcode_modifier.jumpbyte
3281 || i.tm.opcode_modifier.jumpdword)
3282 && i.op[0].disps->X_op == O_constant)
3284 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3285 the absolute address given by the constant. Since ix86 jumps and
3286 calls are pc relative, we need to generate a reloc. */
3287 i.op[0].disps->X_add_symbol = &abs_symbol;
3288 i.op[0].disps->X_op = O_symbol;
3291 if (i.tm.opcode_modifier.rex64)
3294 /* For 8 bit registers we need an empty rex prefix. Also if the
3295 instruction already has a prefix, we need to convert old
3296 registers to new ones. */
3298 if ((i.types[0].bitfield.reg8
3299 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3300 || (i.types[1].bitfield.reg8
3301 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3302 || ((i.types[0].bitfield.reg8
3303 || i.types[1].bitfield.reg8)
3308 i.rex |= REX_OPCODE;
3309 for (x = 0; x < 2; x++)
3311 /* Look for 8 bit operand that uses old registers. */
3312 if (i.types[x].bitfield.reg8
3313 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3315 /* In case it is "hi" register, give up. */
3316 if (i.op[x].regs->reg_num > 3)
3317 as_bad (_("can't encode register '%s%s' in an "
3318 "instruction requiring REX prefix."),
3319 register_prefix, i.op[x].regs->reg_name);
3321 /* Otherwise it is equivalent to the extended register.
3322 Since the encoding doesn't change this is merely
3323 cosmetic cleanup for debug output. */
3325 i.op[x].regs = i.op[x].regs + 8;
3331 add_prefix (REX_OPCODE | i.rex);
3333 /* We are ready to output the insn. */
3338 parse_insn (char *line, char *mnemonic)
3341 char *token_start = l;
3344 const insn_template *t;
3347 /* Non-zero if we found a prefix only acceptable with string insns. */
3348 const char *expecting_string_instruction = NULL;
3353 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3358 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3360 as_bad (_("no such instruction: `%s'"), token_start);
3365 if (!is_space_char (*l)
3366 && *l != END_OF_INSN
3368 || (*l != PREFIX_SEPARATOR
3371 as_bad (_("invalid character %s in mnemonic"),
3372 output_invalid (*l));
3375 if (token_start == l)
3377 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3378 as_bad (_("expecting prefix; got nothing"));
3380 as_bad (_("expecting mnemonic; got nothing"));
3384 /* Look up instruction (or prefix) via hash table. */
3385 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3387 if (*l != END_OF_INSN
3388 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3389 && current_templates
3390 && current_templates->start->opcode_modifier.isprefix)
3392 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3394 as_bad ((flag_code != CODE_64BIT
3395 ? _("`%s' is only supported in 64-bit mode")
3396 : _("`%s' is not supported in 64-bit mode")),
3397 current_templates->start->name);
3400 /* If we are in 16-bit mode, do not allow addr16 or data16.
3401 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3402 if ((current_templates->start->opcode_modifier.size16
3403 || current_templates->start->opcode_modifier.size32)
3404 && flag_code != CODE_64BIT
3405 && (current_templates->start->opcode_modifier.size32
3406 ^ (flag_code == CODE_16BIT)))
3408 as_bad (_("redundant %s prefix"),
3409 current_templates->start->name);
3412 /* Add prefix, checking for repeated prefixes. */
3413 switch (add_prefix (current_templates->start->base_opcode))
3418 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3421 expecting_string_instruction = current_templates->start->name;
3426 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3433 if (!current_templates)
3435 /* Check if we should swap operand or force 32bit displacement in
3437 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3439 else if (mnem_p - 3 == dot_p
3442 i.disp_encoding = disp_encoding_8bit;
3443 else if (mnem_p - 4 == dot_p
3447 i.disp_encoding = disp_encoding_32bit;
3452 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3455 if (!current_templates)
3458 /* See if we can get a match by trimming off a suffix. */
3461 case WORD_MNEM_SUFFIX:
3462 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3463 i.suffix = SHORT_MNEM_SUFFIX;
3465 case BYTE_MNEM_SUFFIX:
3466 case QWORD_MNEM_SUFFIX:
3467 i.suffix = mnem_p[-1];
3469 current_templates = (const templates *) hash_find (op_hash,
3472 case SHORT_MNEM_SUFFIX:
3473 case LONG_MNEM_SUFFIX:
3476 i.suffix = mnem_p[-1];
3478 current_templates = (const templates *) hash_find (op_hash,
3487 if (intel_float_operand (mnemonic) == 1)
3488 i.suffix = SHORT_MNEM_SUFFIX;
3490 i.suffix = LONG_MNEM_SUFFIX;
3492 current_templates = (const templates *) hash_find (op_hash,
3497 if (!current_templates)
3499 as_bad (_("no such instruction: `%s'"), token_start);
3504 if (current_templates->start->opcode_modifier.jump
3505 || current_templates->start->opcode_modifier.jumpbyte)
3507 /* Check for a branch hint. We allow ",pt" and ",pn" for
3508 predict taken and predict not taken respectively.
3509 I'm not sure that branch hints actually do anything on loop
3510 and jcxz insns (JumpByte) for current Pentium4 chips. They
3511 may work in the future and it doesn't hurt to accept them
3513 if (l[0] == ',' && l[1] == 'p')
3517 if (!add_prefix (DS_PREFIX_OPCODE))
3521 else if (l[2] == 'n')
3523 if (!add_prefix (CS_PREFIX_OPCODE))
3529 /* Any other comma loses. */
3532 as_bad (_("invalid character %s in mnemonic"),
3533 output_invalid (*l));
3537 /* Check if instruction is supported on specified architecture. */
3539 for (t = current_templates->start; t < current_templates->end; ++t)
3541 supported |= cpu_flags_match (t);
3542 if (supported == CPU_FLAGS_PERFECT_MATCH)
3546 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3548 as_bad (flag_code == CODE_64BIT
3549 ? _("`%s' is not supported in 64-bit mode")
3550 : _("`%s' is only supported in 64-bit mode"),
3551 current_templates->start->name);
3554 if (supported != CPU_FLAGS_PERFECT_MATCH)
3556 as_bad (_("`%s' is not supported on `%s%s'"),
3557 current_templates->start->name,
3558 cpu_arch_name ? cpu_arch_name : default_arch,
3559 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3564 if (!cpu_arch_flags.bitfield.cpui386
3565 && (flag_code != CODE_16BIT))
3567 as_warn (_("use .code16 to ensure correct addressing mode"));
3570 /* Check for rep/repne without a string (or other allowed) instruction. */
3571 if (expecting_string_instruction)
3573 static templates override;
3575 for (t = current_templates->start; t < current_templates->end; ++t)
3576 if (t->opcode_modifier.repprefixok)
3578 if (t >= current_templates->end)
3580 as_bad (_("expecting string instruction after `%s'"),
3581 expecting_string_instruction);
3584 for (override.start = t; t < current_templates->end; ++t)
3585 if (!t->opcode_modifier.repprefixok)
3588 current_templates = &override;
3595 parse_operands (char *l, const char *mnemonic)
3599 /* 1 if operand is pending after ','. */
3600 unsigned int expecting_operand = 0;
3602 /* Non-zero if operand parens not balanced. */
3603 unsigned int paren_not_balanced;
3605 while (*l != END_OF_INSN)
3607 /* Skip optional white space before operand. */
3608 if (is_space_char (*l))
3610 if (!is_operand_char (*l) && *l != END_OF_INSN)
3612 as_bad (_("invalid character %s before operand %d"),
3613 output_invalid (*l),
3617 token_start = l; /* after white space */
3618 paren_not_balanced = 0;
3619 while (paren_not_balanced || *l != ',')
3621 if (*l == END_OF_INSN)
3623 if (paren_not_balanced)
3626 as_bad (_("unbalanced parenthesis in operand %d."),
3629 as_bad (_("unbalanced brackets in operand %d."),
3634 break; /* we are done */
3636 else if (!is_operand_char (*l) && !is_space_char (*l))
3638 as_bad (_("invalid character %s in operand %d"),
3639 output_invalid (*l),
3646 ++paren_not_balanced;
3648 --paren_not_balanced;
3653 ++paren_not_balanced;
3655 --paren_not_balanced;
3659 if (l != token_start)
3660 { /* Yes, we've read in another operand. */
3661 unsigned int operand_ok;
3662 this_operand = i.operands++;
3663 i.types[this_operand].bitfield.unspecified = 1;
3664 if (i.operands > MAX_OPERANDS)
3666 as_bad (_("spurious operands; (%d operands/instruction max)"),
3670 /* Now parse operand adding info to 'i' as we go along. */
3671 END_STRING_AND_SAVE (l);
3675 i386_intel_operand (token_start,
3676 intel_float_operand (mnemonic));
3678 operand_ok = i386_att_operand (token_start);
3680 RESTORE_END_STRING (l);
3686 if (expecting_operand)
3688 expecting_operand_after_comma:
3689 as_bad (_("expecting operand after ','; got nothing"));
3694 as_bad (_("expecting operand before ','; got nothing"));
3699 /* Now *l must be either ',' or END_OF_INSN. */
3702 if (*++l == END_OF_INSN)
3704 /* Just skip it, if it's \n complain. */
3705 goto expecting_operand_after_comma;
3707 expecting_operand = 1;
3714 swap_2_operands (int xchg1, int xchg2)
3716 union i386_op temp_op;
3717 i386_operand_type temp_type;
3718 enum bfd_reloc_code_real temp_reloc;
3720 temp_type = i.types[xchg2];
3721 i.types[xchg2] = i.types[xchg1];
3722 i.types[xchg1] = temp_type;
3723 temp_op = i.op[xchg2];
3724 i.op[xchg2] = i.op[xchg1];
3725 i.op[xchg1] = temp_op;
3726 temp_reloc = i.reloc[xchg2];
3727 i.reloc[xchg2] = i.reloc[xchg1];
3728 i.reloc[xchg1] = temp_reloc;
3732 swap_operands (void)
3738 swap_2_operands (1, i.operands - 2);
3741 swap_2_operands (0, i.operands - 1);
3747 if (i.mem_operands == 2)
3749 const seg_entry *temp_seg;
3750 temp_seg = i.seg[0];
3751 i.seg[0] = i.seg[1];
3752 i.seg[1] = temp_seg;
3756 /* Try to ensure constant immediates are represented in the smallest
3761 char guess_suffix = 0;
3765 guess_suffix = i.suffix;
3766 else if (i.reg_operands)
3768 /* Figure out a suffix from the last register operand specified.
3769 We can't do this properly yet, ie. excluding InOutPortReg,
3770 but the following works for instructions with immediates.
3771 In any case, we can't set i.suffix yet. */
3772 for (op = i.operands; --op >= 0;)
3773 if (i.types[op].bitfield.reg8)
3775 guess_suffix = BYTE_MNEM_SUFFIX;
3778 else if (i.types[op].bitfield.reg16)
3780 guess_suffix = WORD_MNEM_SUFFIX;
3783 else if (i.types[op].bitfield.reg32)
3785 guess_suffix = LONG_MNEM_SUFFIX;
3788 else if (i.types[op].bitfield.reg64)
3790 guess_suffix = QWORD_MNEM_SUFFIX;
3794 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3795 guess_suffix = WORD_MNEM_SUFFIX;
3797 for (op = i.operands; --op >= 0;)
3798 if (operand_type_check (i.types[op], imm))
3800 switch (i.op[op].imms->X_op)
3803 /* If a suffix is given, this operand may be shortened. */
3804 switch (guess_suffix)
3806 case LONG_MNEM_SUFFIX:
3807 i.types[op].bitfield.imm32 = 1;
3808 i.types[op].bitfield.imm64 = 1;
3810 case WORD_MNEM_SUFFIX:
3811 i.types[op].bitfield.imm16 = 1;
3812 i.types[op].bitfield.imm32 = 1;
3813 i.types[op].bitfield.imm32s = 1;
3814 i.types[op].bitfield.imm64 = 1;
3816 case BYTE_MNEM_SUFFIX:
3817 i.types[op].bitfield.imm8 = 1;
3818 i.types[op].bitfield.imm8s = 1;
3819 i.types[op].bitfield.imm16 = 1;
3820 i.types[op].bitfield.imm32 = 1;
3821 i.types[op].bitfield.imm32s = 1;
3822 i.types[op].bitfield.imm64 = 1;
3826 /* If this operand is at most 16 bits, convert it
3827 to a signed 16 bit number before trying to see
3828 whether it will fit in an even smaller size.
3829 This allows a 16-bit operand such as $0xffe0 to
3830 be recognised as within Imm8S range. */
3831 if ((i.types[op].bitfield.imm16)
3832 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3834 i.op[op].imms->X_add_number =
3835 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3837 if ((i.types[op].bitfield.imm32)
3838 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3841 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3842 ^ ((offsetT) 1 << 31))
3843 - ((offsetT) 1 << 31));
3846 = operand_type_or (i.types[op],
3847 smallest_imm_type (i.op[op].imms->X_add_number));
3849 /* We must avoid matching of Imm32 templates when 64bit
3850 only immediate is available. */
3851 if (guess_suffix == QWORD_MNEM_SUFFIX)
3852 i.types[op].bitfield.imm32 = 0;
3859 /* Symbols and expressions. */
3861 /* Convert symbolic operand to proper sizes for matching, but don't
3862 prevent matching a set of insns that only supports sizes other
3863 than those matching the insn suffix. */
3865 i386_operand_type mask, allowed;
3866 const insn_template *t;
3868 operand_type_set (&mask, 0);
3869 operand_type_set (&allowed, 0);
3871 for (t = current_templates->start;
3872 t < current_templates->end;
3874 allowed = operand_type_or (allowed,
3875 t->operand_types[op]);
3876 switch (guess_suffix)
3878 case QWORD_MNEM_SUFFIX:
3879 mask.bitfield.imm64 = 1;
3880 mask.bitfield.imm32s = 1;
3882 case LONG_MNEM_SUFFIX:
3883 mask.bitfield.imm32 = 1;
3885 case WORD_MNEM_SUFFIX:
3886 mask.bitfield.imm16 = 1;
3888 case BYTE_MNEM_SUFFIX:
3889 mask.bitfield.imm8 = 1;
3894 allowed = operand_type_and (mask, allowed);
3895 if (!operand_type_all_zero (&allowed))
3896 i.types[op] = operand_type_and (i.types[op], mask);
3903 /* Try to use the smallest displacement type too. */
3905 optimize_disp (void)
3909 for (op = i.operands; --op >= 0;)
3910 if (operand_type_check (i.types[op], disp))
3912 if (i.op[op].disps->X_op == O_constant)
3914 offsetT op_disp = i.op[op].disps->X_add_number;
3916 if (i.types[op].bitfield.disp16
3917 && (op_disp & ~(offsetT) 0xffff) == 0)
3919 /* If this operand is at most 16 bits, convert
3920 to a signed 16 bit number and don't use 64bit
3922 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3923 i.types[op].bitfield.disp64 = 0;
3925 if (i.types[op].bitfield.disp32
3926 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3928 /* If this operand is at most 32 bits, convert
3929 to a signed 32 bit number and don't use 64bit
3931 op_disp &= (((offsetT) 2 << 31) - 1);
3932 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3933 i.types[op].bitfield.disp64 = 0;
3935 if (!op_disp && i.types[op].bitfield.baseindex)
3937 i.types[op].bitfield.disp8 = 0;
3938 i.types[op].bitfield.disp16 = 0;
3939 i.types[op].bitfield.disp32 = 0;
3940 i.types[op].bitfield.disp32s = 0;
3941 i.types[op].bitfield.disp64 = 0;
3945 else if (flag_code == CODE_64BIT)
3947 if (fits_in_signed_long (op_disp))
3949 i.types[op].bitfield.disp64 = 0;
3950 i.types[op].bitfield.disp32s = 1;
3952 if (i.prefix[ADDR_PREFIX]
3953 && fits_in_unsigned_long (op_disp))
3954 i.types[op].bitfield.disp32 = 1;
3956 if ((i.types[op].bitfield.disp32
3957 || i.types[op].bitfield.disp32s
3958 || i.types[op].bitfield.disp16)
3959 && fits_in_signed_byte (op_disp))
3960 i.types[op].bitfield.disp8 = 1;
3962 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3963 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3965 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3966 i.op[op].disps, 0, i.reloc[op]);
3967 i.types[op].bitfield.disp8 = 0;
3968 i.types[op].bitfield.disp16 = 0;
3969 i.types[op].bitfield.disp32 = 0;
3970 i.types[op].bitfield.disp32s = 0;
3971 i.types[op].bitfield.disp64 = 0;
3974 /* We only support 64bit displacement on constants. */
3975 i.types[op].bitfield.disp64 = 0;
3979 /* Check if operands are valid for the instruction. */
3982 check_VecOperands (const insn_template *t)
3984 /* Without VSIB byte, we can't have a vector register for index. */
3985 if (!t->opcode_modifier.vecsib
3987 && (i.index_reg->reg_type.bitfield.regxmm
3988 || i.index_reg->reg_type.bitfield.regymm))
3990 i.error = unsupported_vector_index_register;
3994 /* For VSIB byte, we need a vector register for index, and all vector
3995 registers must be distinct. */
3996 if (t->opcode_modifier.vecsib)
3999 || !((t->opcode_modifier.vecsib == VecSIB128
4000 && i.index_reg->reg_type.bitfield.regxmm)
4001 || (t->opcode_modifier.vecsib == VecSIB256
4002 && i.index_reg->reg_type.bitfield.regymm)))
4004 i.error = invalid_vsib_address;
4008 gas_assert (i.reg_operands == 2);
4009 gas_assert (i.types[0].bitfield.regxmm
4010 || i.types[0].bitfield.regymm);
4011 gas_assert (i.types[2].bitfield.regxmm
4012 || i.types[2].bitfield.regymm);
4014 if (operand_check == check_none)
4016 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4017 && register_number (i.op[2].regs) != register_number (i.index_reg)
4018 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4020 if (operand_check == check_error)
4022 i.error = invalid_vector_register_set;
4025 as_warn (_("mask, index, and destination registers should be distinct"));
4031 /* Check if operands are valid for the instruction. Update VEX
4035 VEX_check_operands (const insn_template *t)
4037 if (!t->opcode_modifier.vex)
4040 /* Only check VEX_Imm4, which must be the first operand. */
4041 if (t->operand_types[0].bitfield.vec_imm4)
4043 if (i.op[0].imms->X_op != O_constant
4044 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4050 /* Turn off Imm8 so that update_imm won't complain. */
4051 i.types[0] = vec_imm4;
4057 static const insn_template *
4058 match_template (void)
4060 /* Points to template once we've found it. */
4061 const insn_template *t;
4062 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4063 i386_operand_type overlap4;
4064 unsigned int found_reverse_match;
4065 i386_opcode_modifier suffix_check;
4066 i386_operand_type operand_types [MAX_OPERANDS];
4067 int addr_prefix_disp;
4069 unsigned int found_cpu_match;
4070 unsigned int check_register;
4071 enum i386_error specific_error = 0;
4073 #if MAX_OPERANDS != 5
4074 # error "MAX_OPERANDS must be 5."
4077 found_reverse_match = 0;
4078 addr_prefix_disp = -1;
4080 memset (&suffix_check, 0, sizeof (suffix_check));
4081 if (i.suffix == BYTE_MNEM_SUFFIX)
4082 suffix_check.no_bsuf = 1;
4083 else if (i.suffix == WORD_MNEM_SUFFIX)
4084 suffix_check.no_wsuf = 1;
4085 else if (i.suffix == SHORT_MNEM_SUFFIX)
4086 suffix_check.no_ssuf = 1;
4087 else if (i.suffix == LONG_MNEM_SUFFIX)
4088 suffix_check.no_lsuf = 1;
4089 else if (i.suffix == QWORD_MNEM_SUFFIX)
4090 suffix_check.no_qsuf = 1;
4091 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4092 suffix_check.no_ldsuf = 1;
4094 /* Must have right number of operands. */
4095 i.error = number_of_operands_mismatch;
4097 for (t = current_templates->start; t < current_templates->end; t++)
4099 addr_prefix_disp = -1;
4101 if (i.operands != t->operands)
4104 /* Check processor support. */
4105 i.error = unsupported;
4106 found_cpu_match = (cpu_flags_match (t)
4107 == CPU_FLAGS_PERFECT_MATCH);
4108 if (!found_cpu_match)
4111 /* Check old gcc support. */
4112 i.error = old_gcc_only;
4113 if (!old_gcc && t->opcode_modifier.oldgcc)
4116 /* Check AT&T mnemonic. */
4117 i.error = unsupported_with_intel_mnemonic;
4118 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4121 /* Check AT&T/Intel syntax. */
4122 i.error = unsupported_syntax;
4123 if ((intel_syntax && t->opcode_modifier.attsyntax)
4124 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4127 /* Check the suffix, except for some instructions in intel mode. */
4128 i.error = invalid_instruction_suffix;
4129 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4130 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4131 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4132 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4133 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4134 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4135 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4138 if (!operand_size_match (t))
4141 for (j = 0; j < MAX_OPERANDS; j++)
4142 operand_types[j] = t->operand_types[j];
4144 /* In general, don't allow 64-bit operands in 32-bit mode. */
4145 if (i.suffix == QWORD_MNEM_SUFFIX
4146 && flag_code != CODE_64BIT
4148 ? (!t->opcode_modifier.ignoresize
4149 && !intel_float_operand (t->name))
4150 : intel_float_operand (t->name) != 2)
4151 && ((!operand_types[0].bitfield.regmmx
4152 && !operand_types[0].bitfield.regxmm
4153 && !operand_types[0].bitfield.regymm)
4154 || (!operand_types[t->operands > 1].bitfield.regmmx
4155 && !!operand_types[t->operands > 1].bitfield.regxmm
4156 && !!operand_types[t->operands > 1].bitfield.regymm))
4157 && (t->base_opcode != 0x0fc7
4158 || t->extension_opcode != 1 /* cmpxchg8b */))
4161 /* In general, don't allow 32-bit operands on pre-386. */
4162 else if (i.suffix == LONG_MNEM_SUFFIX
4163 && !cpu_arch_flags.bitfield.cpui386
4165 ? (!t->opcode_modifier.ignoresize
4166 && !intel_float_operand (t->name))
4167 : intel_float_operand (t->name) != 2)
4168 && ((!operand_types[0].bitfield.regmmx
4169 && !operand_types[0].bitfield.regxmm)
4170 || (!operand_types[t->operands > 1].bitfield.regmmx
4171 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4174 /* Do not verify operands when there are none. */
4178 /* We've found a match; break out of loop. */
4182 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4183 into Disp32/Disp16/Disp32 operand. */
4184 if (i.prefix[ADDR_PREFIX] != 0)
4186 /* There should be only one Disp operand. */
4190 for (j = 0; j < MAX_OPERANDS; j++)
4192 if (operand_types[j].bitfield.disp16)
4194 addr_prefix_disp = j;
4195 operand_types[j].bitfield.disp32 = 1;
4196 operand_types[j].bitfield.disp16 = 0;
4202 for (j = 0; j < MAX_OPERANDS; j++)
4204 if (operand_types[j].bitfield.disp32)
4206 addr_prefix_disp = j;
4207 operand_types[j].bitfield.disp32 = 0;
4208 operand_types[j].bitfield.disp16 = 1;
4214 for (j = 0; j < MAX_OPERANDS; j++)
4216 if (operand_types[j].bitfield.disp64)
4218 addr_prefix_disp = j;
4219 operand_types[j].bitfield.disp64 = 0;
4220 operand_types[j].bitfield.disp32 = 1;
4228 /* We check register size if needed. */
4229 check_register = t->opcode_modifier.checkregsize;
4230 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4231 switch (t->operands)
4234 if (!operand_type_match (overlap0, i.types[0]))
4238 /* xchg %eax, %eax is a special case. It is an aliase for nop
4239 only in 32bit mode and we can use opcode 0x90. In 64bit
4240 mode, we can't use 0x90 for xchg %eax, %eax since it should
4241 zero-extend %eax to %rax. */
4242 if (flag_code == CODE_64BIT
4243 && t->base_opcode == 0x90
4244 && operand_type_equal (&i.types [0], &acc32)
4245 && operand_type_equal (&i.types [1], &acc32))
4249 /* If we swap operand in encoding, we either match
4250 the next one or reverse direction of operands. */
4251 if (t->opcode_modifier.s)
4253 else if (t->opcode_modifier.d)
4258 /* If we swap operand in encoding, we match the next one. */
4259 if (i.swap_operand && t->opcode_modifier.s)
4263 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4264 if (!operand_type_match (overlap0, i.types[0])
4265 || !operand_type_match (overlap1, i.types[1])
4267 && !operand_type_register_match (overlap0, i.types[0],
4269 overlap1, i.types[1],
4272 /* Check if other direction is valid ... */
4273 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4277 /* Try reversing direction of operands. */
4278 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4279 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4280 if (!operand_type_match (overlap0, i.types[0])
4281 || !operand_type_match (overlap1, i.types[1])
4283 && !operand_type_register_match (overlap0,
4290 /* Does not match either direction. */
4293 /* found_reverse_match holds which of D or FloatDR
4295 if (t->opcode_modifier.d)
4296 found_reverse_match = Opcode_D;
4297 else if (t->opcode_modifier.floatd)
4298 found_reverse_match = Opcode_FloatD;
4300 found_reverse_match = 0;
4301 if (t->opcode_modifier.floatr)
4302 found_reverse_match |= Opcode_FloatR;
4306 /* Found a forward 2 operand match here. */
4307 switch (t->operands)
4310 overlap4 = operand_type_and (i.types[4],
4313 overlap3 = operand_type_and (i.types[3],
4316 overlap2 = operand_type_and (i.types[2],
4321 switch (t->operands)
4324 if (!operand_type_match (overlap4, i.types[4])
4325 || !operand_type_register_match (overlap3,
4333 if (!operand_type_match (overlap3, i.types[3])
4335 && !operand_type_register_match (overlap2,
4343 /* Here we make use of the fact that there are no
4344 reverse match 3 operand instructions, and all 3
4345 operand instructions only need to be checked for
4346 register consistency between operands 2 and 3. */
4347 if (!operand_type_match (overlap2, i.types[2])
4349 && !operand_type_register_match (overlap1,
4359 /* Found either forward/reverse 2, 3 or 4 operand match here:
4360 slip through to break. */
4362 if (!found_cpu_match)
4364 found_reverse_match = 0;
4368 /* Check if vector and VEX operands are valid. */
4369 if (check_VecOperands (t) || VEX_check_operands (t))
4371 specific_error = i.error;
4375 /* We've found a match; break out of loop. */
4379 if (t == current_templates->end)
4381 /* We found no match. */
4382 const char *err_msg;
4383 switch (specific_error ? specific_error : i.error)
4387 case operand_size_mismatch:
4388 err_msg = _("operand size mismatch");
4390 case operand_type_mismatch:
4391 err_msg = _("operand type mismatch");
4393 case register_type_mismatch:
4394 err_msg = _("register type mismatch");
4396 case number_of_operands_mismatch:
4397 err_msg = _("number of operands mismatch");
4399 case invalid_instruction_suffix:
4400 err_msg = _("invalid instruction suffix");
4403 err_msg = _("constant doesn't fit in 4 bits");
4406 err_msg = _("only supported with old gcc");
4408 case unsupported_with_intel_mnemonic:
4409 err_msg = _("unsupported with Intel mnemonic");
4411 case unsupported_syntax:
4412 err_msg = _("unsupported syntax");
4415 as_bad (_("unsupported instruction `%s'"),
4416 current_templates->start->name);
4418 case invalid_vsib_address:
4419 err_msg = _("invalid VSIB address");
4421 case invalid_vector_register_set:
4422 err_msg = _("mask, index, and destination registers must be distinct");
4424 case unsupported_vector_index_register:
4425 err_msg = _("unsupported vector index register");
4428 as_bad (_("%s for `%s'"), err_msg,
4429 current_templates->start->name);
4433 if (!quiet_warnings)
4436 && (i.types[0].bitfield.jumpabsolute
4437 != operand_types[0].bitfield.jumpabsolute))
4439 as_warn (_("indirect %s without `*'"), t->name);
4442 if (t->opcode_modifier.isprefix
4443 && t->opcode_modifier.ignoresize)
4445 /* Warn them that a data or address size prefix doesn't
4446 affect assembly of the next line of code. */
4447 as_warn (_("stand-alone `%s' prefix"), t->name);
4451 /* Copy the template we found. */
4454 if (addr_prefix_disp != -1)
4455 i.tm.operand_types[addr_prefix_disp]
4456 = operand_types[addr_prefix_disp];
4458 if (found_reverse_match)
4460 /* If we found a reverse match we must alter the opcode
4461 direction bit. found_reverse_match holds bits to change
4462 (different for int & float insns). */
4464 i.tm.base_opcode ^= found_reverse_match;
4466 i.tm.operand_types[0] = operand_types[1];
4467 i.tm.operand_types[1] = operand_types[0];
4476 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4477 if (i.tm.operand_types[mem_op].bitfield.esseg)
4479 if (i.seg[0] != NULL && i.seg[0] != &es)
4481 as_bad (_("`%s' operand %d must use `%ses' segment"),
4487 /* There's only ever one segment override allowed per instruction.
4488 This instruction possibly has a legal segment override on the
4489 second operand, so copy the segment to where non-string
4490 instructions store it, allowing common code. */
4491 i.seg[0] = i.seg[1];
4493 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4495 if (i.seg[1] != NULL && i.seg[1] != &es)
4497 as_bad (_("`%s' operand %d must use `%ses' segment"),
4508 process_suffix (void)
4510 /* If matched instruction specifies an explicit instruction mnemonic
4512 if (i.tm.opcode_modifier.size16)
4513 i.suffix = WORD_MNEM_SUFFIX;
4514 else if (i.tm.opcode_modifier.size32)
4515 i.suffix = LONG_MNEM_SUFFIX;
4516 else if (i.tm.opcode_modifier.size64)
4517 i.suffix = QWORD_MNEM_SUFFIX;
4518 else if (i.reg_operands)
4520 /* If there's no instruction mnemonic suffix we try to invent one
4521 based on register operands. */
4524 /* We take i.suffix from the last register operand specified,
4525 Destination register type is more significant than source
4526 register type. crc32 in SSE4.2 prefers source register
4528 if (i.tm.base_opcode == 0xf20f38f1)
4530 if (i.types[0].bitfield.reg16)
4531 i.suffix = WORD_MNEM_SUFFIX;
4532 else if (i.types[0].bitfield.reg32)
4533 i.suffix = LONG_MNEM_SUFFIX;
4534 else if (i.types[0].bitfield.reg64)
4535 i.suffix = QWORD_MNEM_SUFFIX;
4537 else if (i.tm.base_opcode == 0xf20f38f0)
4539 if (i.types[0].bitfield.reg8)
4540 i.suffix = BYTE_MNEM_SUFFIX;
4547 if (i.tm.base_opcode == 0xf20f38f1
4548 || i.tm.base_opcode == 0xf20f38f0)
4550 /* We have to know the operand size for crc32. */
4551 as_bad (_("ambiguous memory operand size for `%s`"),
4556 for (op = i.operands; --op >= 0;)
4557 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4559 if (i.types[op].bitfield.reg8)
4561 i.suffix = BYTE_MNEM_SUFFIX;
4564 else if (i.types[op].bitfield.reg16)
4566 i.suffix = WORD_MNEM_SUFFIX;
4569 else if (i.types[op].bitfield.reg32)
4571 i.suffix = LONG_MNEM_SUFFIX;
4574 else if (i.types[op].bitfield.reg64)
4576 i.suffix = QWORD_MNEM_SUFFIX;
4582 else if (i.suffix == BYTE_MNEM_SUFFIX)
4585 && i.tm.opcode_modifier.ignoresize
4586 && i.tm.opcode_modifier.no_bsuf)
4588 else if (!check_byte_reg ())
4591 else if (i.suffix == LONG_MNEM_SUFFIX)
4594 && i.tm.opcode_modifier.ignoresize
4595 && i.tm.opcode_modifier.no_lsuf)
4597 else if (!check_long_reg ())
4600 else if (i.suffix == QWORD_MNEM_SUFFIX)
4603 && i.tm.opcode_modifier.ignoresize
4604 && i.tm.opcode_modifier.no_qsuf)
4606 else if (!check_qword_reg ())
4609 else if (i.suffix == WORD_MNEM_SUFFIX)
4612 && i.tm.opcode_modifier.ignoresize
4613 && i.tm.opcode_modifier.no_wsuf)
4615 else if (!check_word_reg ())
4618 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4619 || i.suffix == YMMWORD_MNEM_SUFFIX)
4621 /* Skip if the instruction has x/y suffix. match_template
4622 should check if it is a valid suffix. */
4624 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4625 /* Do nothing if the instruction is going to ignore the prefix. */
4630 else if (i.tm.opcode_modifier.defaultsize
4632 /* exclude fldenv/frstor/fsave/fstenv */
4633 && i.tm.opcode_modifier.no_ssuf)
4635 i.suffix = stackop_size;
4637 else if (intel_syntax
4639 && (i.tm.operand_types[0].bitfield.jumpabsolute
4640 || i.tm.opcode_modifier.jumpbyte
4641 || i.tm.opcode_modifier.jumpintersegment
4642 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4643 && i.tm.extension_opcode <= 3)))
4648 if (!i.tm.opcode_modifier.no_qsuf)
4650 i.suffix = QWORD_MNEM_SUFFIX;
4654 if (!i.tm.opcode_modifier.no_lsuf)
4655 i.suffix = LONG_MNEM_SUFFIX;
4658 if (!i.tm.opcode_modifier.no_wsuf)
4659 i.suffix = WORD_MNEM_SUFFIX;
4668 if (i.tm.opcode_modifier.w)
4670 as_bad (_("no instruction mnemonic suffix given and "
4671 "no register operands; can't size instruction"));
4677 unsigned int suffixes;
4679 suffixes = !i.tm.opcode_modifier.no_bsuf;
4680 if (!i.tm.opcode_modifier.no_wsuf)
4682 if (!i.tm.opcode_modifier.no_lsuf)
4684 if (!i.tm.opcode_modifier.no_ldsuf)
4686 if (!i.tm.opcode_modifier.no_ssuf)
4688 if (!i.tm.opcode_modifier.no_qsuf)
4691 /* There are more than suffix matches. */
4692 if (i.tm.opcode_modifier.w
4693 || ((suffixes & (suffixes - 1))
4694 && !i.tm.opcode_modifier.defaultsize
4695 && !i.tm.opcode_modifier.ignoresize))
4697 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4703 /* Change the opcode based on the operand size given by i.suffix;
4704 We don't need to change things for byte insns. */
4707 && i.suffix != BYTE_MNEM_SUFFIX
4708 && i.suffix != XMMWORD_MNEM_SUFFIX
4709 && i.suffix != YMMWORD_MNEM_SUFFIX)
4711 /* It's not a byte, select word/dword operation. */
4712 if (i.tm.opcode_modifier.w)
4714 if (i.tm.opcode_modifier.shortform)
4715 i.tm.base_opcode |= 8;
4717 i.tm.base_opcode |= 1;
4720 /* Now select between word & dword operations via the operand
4721 size prefix, except for instructions that will ignore this
4723 if (i.tm.opcode_modifier.addrprefixop0)
4725 /* The address size override prefix changes the size of the
4727 if ((flag_code == CODE_32BIT
4728 && i.op->regs[0].reg_type.bitfield.reg16)
4729 || (flag_code != CODE_32BIT
4730 && i.op->regs[0].reg_type.bitfield.reg32))
4731 if (!add_prefix (ADDR_PREFIX_OPCODE))
4734 else if (i.suffix != QWORD_MNEM_SUFFIX
4735 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4736 && !i.tm.opcode_modifier.ignoresize
4737 && !i.tm.opcode_modifier.floatmf
4738 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4739 || (flag_code == CODE_64BIT
4740 && i.tm.opcode_modifier.jumpbyte)))
4742 unsigned int prefix = DATA_PREFIX_OPCODE;
4744 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4745 prefix = ADDR_PREFIX_OPCODE;
4747 if (!add_prefix (prefix))
4751 /* Set mode64 for an operand. */
4752 if (i.suffix == QWORD_MNEM_SUFFIX
4753 && flag_code == CODE_64BIT
4754 && !i.tm.opcode_modifier.norex64)
4756 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4757 need rex64. cmpxchg8b is also a special case. */
4758 if (! (i.operands == 2
4759 && i.tm.base_opcode == 0x90
4760 && i.tm.extension_opcode == None
4761 && operand_type_equal (&i.types [0], &acc64)
4762 && operand_type_equal (&i.types [1], &acc64))
4763 && ! (i.operands == 1
4764 && i.tm.base_opcode == 0xfc7
4765 && i.tm.extension_opcode == 1
4766 && !operand_type_check (i.types [0], reg)
4767 && operand_type_check (i.types [0], anymem)))
4771 /* Size floating point instruction. */
4772 if (i.suffix == LONG_MNEM_SUFFIX)
4773 if (i.tm.opcode_modifier.floatmf)
4774 i.tm.base_opcode ^= 4;
4781 check_byte_reg (void)
4785 for (op = i.operands; --op >= 0;)
4787 /* If this is an eight bit register, it's OK. If it's the 16 or
4788 32 bit version of an eight bit register, we will just use the
4789 low portion, and that's OK too. */
4790 if (i.types[op].bitfield.reg8)
4793 /* I/O port address operands are OK too. */
4794 if (i.tm.operand_types[op].bitfield.inoutportreg)
4797 /* crc32 doesn't generate this warning. */
4798 if (i.tm.base_opcode == 0xf20f38f0)
4801 if ((i.types[op].bitfield.reg16
4802 || i.types[op].bitfield.reg32
4803 || i.types[op].bitfield.reg64)
4804 && i.op[op].regs->reg_num < 4
4805 /* Prohibit these changes in 64bit mode, since the lowering
4806 would be more complicated. */
4807 && flag_code != CODE_64BIT)
4809 #if REGISTER_WARNINGS
4810 if (!quiet_warnings)
4811 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4813 (i.op[op].regs + (i.types[op].bitfield.reg16
4814 ? REGNAM_AL - REGNAM_AX
4815 : REGNAM_AL - REGNAM_EAX))->reg_name,
4817 i.op[op].regs->reg_name,
4822 /* Any other register is bad. */
4823 if (i.types[op].bitfield.reg16
4824 || i.types[op].bitfield.reg32
4825 || i.types[op].bitfield.reg64
4826 || i.types[op].bitfield.regmmx
4827 || i.types[op].bitfield.regxmm
4828 || i.types[op].bitfield.regymm
4829 || i.types[op].bitfield.sreg2
4830 || i.types[op].bitfield.sreg3
4831 || i.types[op].bitfield.control
4832 || i.types[op].bitfield.debug
4833 || i.types[op].bitfield.test
4834 || i.types[op].bitfield.floatreg
4835 || i.types[op].bitfield.floatacc)
4837 as_bad (_("`%s%s' not allowed with `%s%c'"),
4839 i.op[op].regs->reg_name,
4849 check_long_reg (void)
4853 for (op = i.operands; --op >= 0;)
4854 /* Reject eight bit registers, except where the template requires
4855 them. (eg. movzb) */
4856 if (i.types[op].bitfield.reg8
4857 && (i.tm.operand_types[op].bitfield.reg16
4858 || i.tm.operand_types[op].bitfield.reg32
4859 || i.tm.operand_types[op].bitfield.acc))
4861 as_bad (_("`%s%s' not allowed with `%s%c'"),
4863 i.op[op].regs->reg_name,
4868 /* Warn if the e prefix on a general reg is missing. */
4869 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4870 && i.types[op].bitfield.reg16
4871 && (i.tm.operand_types[op].bitfield.reg32
4872 || i.tm.operand_types[op].bitfield.acc))
4874 /* Prohibit these changes in the 64bit mode, since the
4875 lowering is more complicated. */
4876 if (flag_code == CODE_64BIT)
4878 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4879 register_prefix, i.op[op].regs->reg_name,
4883 #if REGISTER_WARNINGS
4885 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4887 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4889 i.op[op].regs->reg_name,
4893 /* Warn if the r prefix on a general reg is missing. */
4894 else if (i.types[op].bitfield.reg64
4895 && (i.tm.operand_types[op].bitfield.reg32
4896 || i.tm.operand_types[op].bitfield.acc))
4899 && i.tm.opcode_modifier.toqword
4900 && !i.types[0].bitfield.regxmm)
4902 /* Convert to QWORD. We want REX byte. */
4903 i.suffix = QWORD_MNEM_SUFFIX;
4907 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4908 register_prefix, i.op[op].regs->reg_name,
4917 check_qword_reg (void)
4921 for (op = i.operands; --op >= 0; )
4922 /* Reject eight bit registers, except where the template requires
4923 them. (eg. movzb) */
4924 if (i.types[op].bitfield.reg8
4925 && (i.tm.operand_types[op].bitfield.reg16
4926 || i.tm.operand_types[op].bitfield.reg32
4927 || i.tm.operand_types[op].bitfield.acc))
4929 as_bad (_("`%s%s' not allowed with `%s%c'"),
4931 i.op[op].regs->reg_name,
4936 /* Warn if the e prefix on a general reg is missing. */
4937 else if ((i.types[op].bitfield.reg16
4938 || i.types[op].bitfield.reg32)
4939 && (i.tm.operand_types[op].bitfield.reg32
4940 || i.tm.operand_types[op].bitfield.acc))
4942 /* Prohibit these changes in the 64bit mode, since the
4943 lowering is more complicated. */
4945 && i.tm.opcode_modifier.todword
4946 && !i.types[0].bitfield.regxmm)
4948 /* Convert to DWORD. We don't want REX byte. */
4949 i.suffix = LONG_MNEM_SUFFIX;
4953 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4954 register_prefix, i.op[op].regs->reg_name,
4963 check_word_reg (void)
4966 for (op = i.operands; --op >= 0;)
4967 /* Reject eight bit registers, except where the template requires
4968 them. (eg. movzb) */
4969 if (i.types[op].bitfield.reg8
4970 && (i.tm.operand_types[op].bitfield.reg16
4971 || i.tm.operand_types[op].bitfield.reg32
4972 || i.tm.operand_types[op].bitfield.acc))
4974 as_bad (_("`%s%s' not allowed with `%s%c'"),
4976 i.op[op].regs->reg_name,
4981 /* Warn if the e prefix on a general reg is present. */
4982 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4983 && i.types[op].bitfield.reg32
4984 && (i.tm.operand_types[op].bitfield.reg16
4985 || i.tm.operand_types[op].bitfield.acc))
4987 /* Prohibit these changes in the 64bit mode, since the
4988 lowering is more complicated. */
4989 if (flag_code == CODE_64BIT)
4991 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4992 register_prefix, i.op[op].regs->reg_name,
4997 #if REGISTER_WARNINGS
4998 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5000 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5002 i.op[op].regs->reg_name,
5010 update_imm (unsigned int j)
5012 i386_operand_type overlap = i.types[j];
5013 if ((overlap.bitfield.imm8
5014 || overlap.bitfield.imm8s
5015 || overlap.bitfield.imm16
5016 || overlap.bitfield.imm32
5017 || overlap.bitfield.imm32s
5018 || overlap.bitfield.imm64)
5019 && !operand_type_equal (&overlap, &imm8)
5020 && !operand_type_equal (&overlap, &imm8s)
5021 && !operand_type_equal (&overlap, &imm16)
5022 && !operand_type_equal (&overlap, &imm32)
5023 && !operand_type_equal (&overlap, &imm32s)
5024 && !operand_type_equal (&overlap, &imm64))
5028 i386_operand_type temp;
5030 operand_type_set (&temp, 0);
5031 if (i.suffix == BYTE_MNEM_SUFFIX)
5033 temp.bitfield.imm8 = overlap.bitfield.imm8;
5034 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5036 else if (i.suffix == WORD_MNEM_SUFFIX)
5037 temp.bitfield.imm16 = overlap.bitfield.imm16;
5038 else if (i.suffix == QWORD_MNEM_SUFFIX)
5040 temp.bitfield.imm64 = overlap.bitfield.imm64;
5041 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5044 temp.bitfield.imm32 = overlap.bitfield.imm32;
5047 else if (operand_type_equal (&overlap, &imm16_32_32s)
5048 || operand_type_equal (&overlap, &imm16_32)
5049 || operand_type_equal (&overlap, &imm16_32s))
5051 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5056 if (!operand_type_equal (&overlap, &imm8)
5057 && !operand_type_equal (&overlap, &imm8s)
5058 && !operand_type_equal (&overlap, &imm16)
5059 && !operand_type_equal (&overlap, &imm32)
5060 && !operand_type_equal (&overlap, &imm32s)
5061 && !operand_type_equal (&overlap, &imm64))
5063 as_bad (_("no instruction mnemonic suffix given; "
5064 "can't determine immediate size"));
5068 i.types[j] = overlap;
5078 /* Update the first 2 immediate operands. */
5079 n = i.operands > 2 ? 2 : i.operands;
5082 for (j = 0; j < n; j++)
5083 if (update_imm (j) == 0)
5086 /* The 3rd operand can't be immediate operand. */
5087 gas_assert (operand_type_check (i.types[2], imm) == 0);
5094 bad_implicit_operand (int xmm)
5096 const char *ireg = xmm ? "xmm0" : "ymm0";
5099 as_bad (_("the last operand of `%s' must be `%s%s'"),
5100 i.tm.name, register_prefix, ireg);
5102 as_bad (_("the first operand of `%s' must be `%s%s'"),
5103 i.tm.name, register_prefix, ireg);
5108 process_operands (void)
5110 /* Default segment register this instruction will use for memory
5111 accesses. 0 means unknown. This is only for optimizing out
5112 unnecessary segment overrides. */
5113 const seg_entry *default_seg = 0;
5115 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5117 unsigned int dupl = i.operands;
5118 unsigned int dest = dupl - 1;
5121 /* The destination must be an xmm register. */
5122 gas_assert (i.reg_operands
5123 && MAX_OPERANDS > dupl
5124 && operand_type_equal (&i.types[dest], ®xmm));
5126 if (i.tm.opcode_modifier.firstxmm0)
5128 /* The first operand is implicit and must be xmm0. */
5129 gas_assert (operand_type_equal (&i.types[0], ®xmm));
5130 if (register_number (i.op[0].regs) != 0)
5131 return bad_implicit_operand (1);
5133 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5135 /* Keep xmm0 for instructions with VEX prefix and 3
5141 /* We remove the first xmm0 and keep the number of
5142 operands unchanged, which in fact duplicates the
5144 for (j = 1; j < i.operands; j++)
5146 i.op[j - 1] = i.op[j];
5147 i.types[j - 1] = i.types[j];
5148 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5152 else if (i.tm.opcode_modifier.implicit1stxmm0)
5154 gas_assert ((MAX_OPERANDS - 1) > dupl
5155 && (i.tm.opcode_modifier.vexsources
5158 /* Add the implicit xmm0 for instructions with VEX prefix
5160 for (j = i.operands; j > 0; j--)
5162 i.op[j] = i.op[j - 1];
5163 i.types[j] = i.types[j - 1];
5164 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5167 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5168 i.types[0] = regxmm;
5169 i.tm.operand_types[0] = regxmm;
5172 i.reg_operands += 2;
5177 i.op[dupl] = i.op[dest];
5178 i.types[dupl] = i.types[dest];
5179 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5188 i.op[dupl] = i.op[dest];
5189 i.types[dupl] = i.types[dest];
5190 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5193 if (i.tm.opcode_modifier.immext)
5196 else if (i.tm.opcode_modifier.firstxmm0)
5200 /* The first operand is implicit and must be xmm0/ymm0. */
5201 gas_assert (i.reg_operands
5202 && (operand_type_equal (&i.types[0], ®xmm)
5203 || operand_type_equal (&i.types[0], ®ymm)));
5204 if (register_number (i.op[0].regs) != 0)
5205 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5207 for (j = 1; j < i.operands; j++)
5209 i.op[j - 1] = i.op[j];
5210 i.types[j - 1] = i.types[j];
5212 /* We need to adjust fields in i.tm since they are used by
5213 build_modrm_byte. */
5214 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5221 else if (i.tm.opcode_modifier.regkludge)
5223 /* The imul $imm, %reg instruction is converted into
5224 imul $imm, %reg, %reg, and the clr %reg instruction
5225 is converted into xor %reg, %reg. */
5227 unsigned int first_reg_op;
5229 if (operand_type_check (i.types[0], reg))
5233 /* Pretend we saw the extra register operand. */
5234 gas_assert (i.reg_operands == 1
5235 && i.op[first_reg_op + 1].regs == 0);
5236 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5237 i.types[first_reg_op + 1] = i.types[first_reg_op];
5242 if (i.tm.opcode_modifier.shortform)
5244 if (i.types[0].bitfield.sreg2
5245 || i.types[0].bitfield.sreg3)
5247 if (i.tm.base_opcode == POP_SEG_SHORT
5248 && i.op[0].regs->reg_num == 1)
5250 as_bad (_("you can't `pop %scs'"), register_prefix);
5253 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5254 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5259 /* The register or float register operand is in operand
5263 if (i.types[0].bitfield.floatreg
5264 || operand_type_check (i.types[0], reg))
5268 /* Register goes in low 3 bits of opcode. */
5269 i.tm.base_opcode |= i.op[op].regs->reg_num;
5270 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5272 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5274 /* Warn about some common errors, but press on regardless.
5275 The first case can be generated by gcc (<= 2.8.1). */
5276 if (i.operands == 2)
5278 /* Reversed arguments on faddp, fsubp, etc. */
5279 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5280 register_prefix, i.op[!intel_syntax].regs->reg_name,
5281 register_prefix, i.op[intel_syntax].regs->reg_name);
5285 /* Extraneous `l' suffix on fp insn. */
5286 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5287 register_prefix, i.op[0].regs->reg_name);
5292 else if (i.tm.opcode_modifier.modrm)
5294 /* The opcode is completed (modulo i.tm.extension_opcode which
5295 must be put into the modrm byte). Now, we make the modrm and
5296 index base bytes based on all the info we've collected. */
5298 default_seg = build_modrm_byte ();
5300 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5304 else if (i.tm.opcode_modifier.isstring)
5306 /* For the string instructions that allow a segment override
5307 on one of their operands, the default segment is ds. */
5311 if (i.tm.base_opcode == 0x8d /* lea */
5314 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5316 /* If a segment was explicitly specified, and the specified segment
5317 is not the default, use an opcode prefix to select it. If we
5318 never figured out what the default segment is, then default_seg
5319 will be zero at this point, and the specified segment prefix will
5321 if ((i.seg[0]) && (i.seg[0] != default_seg))
5323 if (!add_prefix (i.seg[0]->seg_prefix))
5329 static const seg_entry *
5330 build_modrm_byte (void)
5332 const seg_entry *default_seg = 0;
5333 unsigned int source, dest;
5336 /* The first operand of instructions with VEX prefix and 3 sources
5337 must be VEX_Imm4. */
5338 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5341 unsigned int nds, reg_slot;
5344 if (i.tm.opcode_modifier.veximmext
5345 && i.tm.opcode_modifier.immext)
5347 dest = i.operands - 2;
5348 gas_assert (dest == 3);
5351 dest = i.operands - 1;
5354 /* There are 2 kinds of instructions:
5355 1. 5 operands: 4 register operands or 3 register operands
5356 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5357 VexW0 or VexW1. The destination must be either XMM or YMM
5359 2. 4 operands: 4 register operands or 3 register operands
5360 plus 1 memory operand, VexXDS, and VexImmExt */
5361 gas_assert ((i.reg_operands == 4
5362 || (i.reg_operands == 3 && i.mem_operands == 1))
5363 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5364 && (i.tm.opcode_modifier.veximmext
5365 || (i.imm_operands == 1
5366 && i.types[0].bitfield.vec_imm4
5367 && (i.tm.opcode_modifier.vexw == VEXW0
5368 || i.tm.opcode_modifier.vexw == VEXW1)
5369 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5370 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)))));
5372 if (i.imm_operands == 0)
5374 /* When there is no immediate operand, generate an 8bit
5375 immediate operand to encode the first operand. */
5376 exp = &im_expressions[i.imm_operands++];
5377 i.op[i.operands].imms = exp;
5378 i.types[i.operands] = imm8;
5380 /* If VexW1 is set, the first operand is the source and
5381 the second operand is encoded in the immediate operand. */
5382 if (i.tm.opcode_modifier.vexw == VEXW1)
5393 /* FMA swaps REG and NDS. */
5394 if (i.tm.cpu_flags.bitfield.cpufma)
5402 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5404 || operand_type_equal (&i.tm.operand_types[reg_slot],
5406 exp->X_op = O_constant;
5407 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5411 unsigned int imm_slot;
5413 if (i.tm.opcode_modifier.vexw == VEXW0)
5415 /* If VexW0 is set, the third operand is the source and
5416 the second operand is encoded in the immediate
5423 /* VexW1 is set, the second operand is the source and
5424 the third operand is encoded in the immediate
5430 if (i.tm.opcode_modifier.immext)
5432 /* When ImmExt is set, the immdiate byte is the last
5434 imm_slot = i.operands - 1;
5442 /* Turn on Imm8 so that output_imm will generate it. */
5443 i.types[imm_slot].bitfield.imm8 = 1;
5446 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5448 || operand_type_equal (&i.tm.operand_types[reg_slot],
5450 i.op[imm_slot].imms->X_add_number
5451 |= register_number (i.op[reg_slot].regs) << 4;
5454 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
5455 || operand_type_equal (&i.tm.operand_types[nds],
5457 i.vex.register_specifier = i.op[nds].regs;
5462 /* i.reg_operands MUST be the number of real register operands;
5463 implicit registers do not count. If there are 3 register
5464 operands, it must be a instruction with VexNDS. For a
5465 instruction with VexNDD, the destination register is encoded
5466 in VEX prefix. If there are 4 register operands, it must be
5467 a instruction with VEX prefix and 3 sources. */
5468 if (i.mem_operands == 0
5469 && ((i.reg_operands == 2
5470 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5471 || (i.reg_operands == 3
5472 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5473 || (i.reg_operands == 4 && vex_3_sources)))
5481 /* When there are 3 operands, one of them may be immediate,
5482 which may be the first or the last operand. Otherwise,
5483 the first operand must be shift count register (cl) or it
5484 is an instruction with VexNDS. */
5485 gas_assert (i.imm_operands == 1
5486 || (i.imm_operands == 0
5487 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5488 || i.types[0].bitfield.shiftcount)));
5489 if (operand_type_check (i.types[0], imm)
5490 || i.types[0].bitfield.shiftcount)
5496 /* When there are 4 operands, the first two must be 8bit
5497 immediate operands. The source operand will be the 3rd
5500 For instructions with VexNDS, if the first operand
5501 an imm8, the source operand is the 2nd one. If the last
5502 operand is imm8, the source operand is the first one. */
5503 gas_assert ((i.imm_operands == 2
5504 && i.types[0].bitfield.imm8
5505 && i.types[1].bitfield.imm8)
5506 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5507 && i.imm_operands == 1
5508 && (i.types[0].bitfield.imm8
5509 || i.types[i.operands - 1].bitfield.imm8)));
5510 if (i.imm_operands == 2)
5514 if (i.types[0].bitfield.imm8)
5530 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5532 /* For instructions with VexNDS, the register-only
5533 source operand must be 32/64bit integer, XMM or
5534 YMM register. It is encoded in VEX prefix. We
5535 need to clear RegMem bit before calling
5536 operand_type_equal. */
5538 i386_operand_type op;
5541 /* Check register-only source operand when two source
5542 operands are swapped. */
5543 if (!i.tm.operand_types[source].bitfield.baseindex
5544 && i.tm.operand_types[dest].bitfield.baseindex)
5552 op = i.tm.operand_types[vvvv];
5553 op.bitfield.regmem = 0;
5554 if ((dest + 1) >= i.operands
5555 || (op.bitfield.reg32 != 1
5556 && !op.bitfield.reg64 != 1
5557 && !operand_type_equal (&op, ®xmm)
5558 && !operand_type_equal (&op, ®ymm)))
5560 i.vex.register_specifier = i.op[vvvv].regs;
5566 /* One of the register operands will be encoded in the i.tm.reg
5567 field, the other in the combined i.tm.mode and i.tm.regmem
5568 fields. If no form of this instruction supports a memory
5569 destination operand, then we assume the source operand may
5570 sometimes be a memory operand and so we need to store the
5571 destination in the i.rm.reg field. */
5572 if (!i.tm.operand_types[dest].bitfield.regmem
5573 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5575 i.rm.reg = i.op[dest].regs->reg_num;
5576 i.rm.regmem = i.op[source].regs->reg_num;
5577 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5579 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5584 i.rm.reg = i.op[source].regs->reg_num;
5585 i.rm.regmem = i.op[dest].regs->reg_num;
5586 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5588 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5591 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5593 if (!i.types[0].bitfield.control
5594 && !i.types[1].bitfield.control)
5596 i.rex &= ~(REX_R | REX_B);
5597 add_prefix (LOCK_PREFIX_OPCODE);
5601 { /* If it's not 2 reg operands... */
5606 unsigned int fake_zero_displacement = 0;
5609 for (op = 0; op < i.operands; op++)
5610 if (operand_type_check (i.types[op], anymem))
5612 gas_assert (op < i.operands);
5614 if (i.tm.opcode_modifier.vecsib)
5616 if (i.index_reg->reg_num == RegEiz
5617 || i.index_reg->reg_num == RegRiz)
5620 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5623 i.sib.base = NO_BASE_REGISTER;
5624 i.sib.scale = i.log2_scale_factor;
5625 i.types[op].bitfield.disp8 = 0;
5626 i.types[op].bitfield.disp16 = 0;
5627 i.types[op].bitfield.disp64 = 0;
5628 if (flag_code != CODE_64BIT)
5630 /* Must be 32 bit */
5631 i.types[op].bitfield.disp32 = 1;
5632 i.types[op].bitfield.disp32s = 0;
5636 i.types[op].bitfield.disp32 = 0;
5637 i.types[op].bitfield.disp32s = 1;
5640 i.sib.index = i.index_reg->reg_num;
5641 if ((i.index_reg->reg_flags & RegRex) != 0)
5647 if (i.base_reg == 0)
5650 if (!i.disp_operands)
5652 fake_zero_displacement = 1;
5653 /* Instructions with VSIB byte need 32bit displacement
5654 if there is no base register. */
5655 if (i.tm.opcode_modifier.vecsib)
5656 i.types[op].bitfield.disp32 = 1;
5658 if (i.index_reg == 0)
5660 gas_assert (!i.tm.opcode_modifier.vecsib);
5661 /* Operand is just <disp> */
5662 if (flag_code == CODE_64BIT)
5664 /* 64bit mode overwrites the 32bit absolute
5665 addressing by RIP relative addressing and
5666 absolute addressing is encoded by one of the
5667 redundant SIB forms. */
5668 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5669 i.sib.base = NO_BASE_REGISTER;
5670 i.sib.index = NO_INDEX_REGISTER;
5671 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5672 ? disp32s : disp32);
5674 else if ((flag_code == CODE_16BIT)
5675 ^ (i.prefix[ADDR_PREFIX] != 0))
5677 i.rm.regmem = NO_BASE_REGISTER_16;
5678 i.types[op] = disp16;
5682 i.rm.regmem = NO_BASE_REGISTER;
5683 i.types[op] = disp32;
5686 else if (!i.tm.opcode_modifier.vecsib)
5688 /* !i.base_reg && i.index_reg */
5689 if (i.index_reg->reg_num == RegEiz
5690 || i.index_reg->reg_num == RegRiz)
5691 i.sib.index = NO_INDEX_REGISTER;
5693 i.sib.index = i.index_reg->reg_num;
5694 i.sib.base = NO_BASE_REGISTER;
5695 i.sib.scale = i.log2_scale_factor;
5696 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5697 i.types[op].bitfield.disp8 = 0;
5698 i.types[op].bitfield.disp16 = 0;
5699 i.types[op].bitfield.disp64 = 0;
5700 if (flag_code != CODE_64BIT)
5702 /* Must be 32 bit */
5703 i.types[op].bitfield.disp32 = 1;
5704 i.types[op].bitfield.disp32s = 0;
5708 i.types[op].bitfield.disp32 = 0;
5709 i.types[op].bitfield.disp32s = 1;
5711 if ((i.index_reg->reg_flags & RegRex) != 0)
5715 /* RIP addressing for 64bit mode. */
5716 else if (i.base_reg->reg_num == RegRip ||
5717 i.base_reg->reg_num == RegEip)
5719 gas_assert (!i.tm.opcode_modifier.vecsib);
5720 i.rm.regmem = NO_BASE_REGISTER;
5721 i.types[op].bitfield.disp8 = 0;
5722 i.types[op].bitfield.disp16 = 0;
5723 i.types[op].bitfield.disp32 = 0;
5724 i.types[op].bitfield.disp32s = 1;
5725 i.types[op].bitfield.disp64 = 0;
5726 i.flags[op] |= Operand_PCrel;
5727 if (! i.disp_operands)
5728 fake_zero_displacement = 1;
5730 else if (i.base_reg->reg_type.bitfield.reg16)
5732 gas_assert (!i.tm.opcode_modifier.vecsib);
5733 switch (i.base_reg->reg_num)
5736 if (i.index_reg == 0)
5738 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5739 i.rm.regmem = i.index_reg->reg_num - 6;
5743 if (i.index_reg == 0)
5746 if (operand_type_check (i.types[op], disp) == 0)
5748 /* fake (%bp) into 0(%bp) */
5749 i.types[op].bitfield.disp8 = 1;
5750 fake_zero_displacement = 1;
5753 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5754 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5756 default: /* (%si) -> 4 or (%di) -> 5 */
5757 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5759 i.rm.mode = mode_from_disp_size (i.types[op]);
5761 else /* i.base_reg and 32/64 bit mode */
5763 if (flag_code == CODE_64BIT
5764 && operand_type_check (i.types[op], disp))
5766 i386_operand_type temp;
5767 operand_type_set (&temp, 0);
5768 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5770 if (i.prefix[ADDR_PREFIX] == 0)
5771 i.types[op].bitfield.disp32s = 1;
5773 i.types[op].bitfield.disp32 = 1;
5776 if (!i.tm.opcode_modifier.vecsib)
5777 i.rm.regmem = i.base_reg->reg_num;
5778 if ((i.base_reg->reg_flags & RegRex) != 0)
5780 i.sib.base = i.base_reg->reg_num;
5781 /* x86-64 ignores REX prefix bit here to avoid decoder
5783 if (!(i.base_reg->reg_flags & RegRex)
5784 && (i.base_reg->reg_num == EBP_REG_NUM
5785 || i.base_reg->reg_num == ESP_REG_NUM))
5787 if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
5789 fake_zero_displacement = 1;
5790 i.types[op].bitfield.disp8 = 1;
5792 i.sib.scale = i.log2_scale_factor;
5793 if (i.index_reg == 0)
5795 gas_assert (!i.tm.opcode_modifier.vecsib);
5796 /* <disp>(%esp) becomes two byte modrm with no index
5797 register. We've already stored the code for esp
5798 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5799 Any base register besides %esp will not use the
5800 extra modrm byte. */
5801 i.sib.index = NO_INDEX_REGISTER;
5803 else if (!i.tm.opcode_modifier.vecsib)
5805 if (i.index_reg->reg_num == RegEiz
5806 || i.index_reg->reg_num == RegRiz)
5807 i.sib.index = NO_INDEX_REGISTER;
5809 i.sib.index = i.index_reg->reg_num;
5810 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5811 if ((i.index_reg->reg_flags & RegRex) != 0)
5816 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5817 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5821 if (!fake_zero_displacement
5825 fake_zero_displacement = 1;
5826 if (i.disp_encoding == disp_encoding_8bit)
5827 i.types[op].bitfield.disp8 = 1;
5829 i.types[op].bitfield.disp32 = 1;
5831 i.rm.mode = mode_from_disp_size (i.types[op]);
5835 if (fake_zero_displacement)
5837 /* Fakes a zero displacement assuming that i.types[op]
5838 holds the correct displacement size. */
5841 gas_assert (i.op[op].disps == 0);
5842 exp = &disp_expressions[i.disp_operands++];
5843 i.op[op].disps = exp;
5844 exp->X_op = O_constant;
5845 exp->X_add_number = 0;
5846 exp->X_add_symbol = (symbolS *) 0;
5847 exp->X_op_symbol = (symbolS *) 0;
5855 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5857 if (operand_type_check (i.types[0], imm))
5858 i.vex.register_specifier = NULL;
5861 /* VEX.vvvv encodes one of the sources when the first
5862 operand is not an immediate. */
5863 if (i.tm.opcode_modifier.vexw == VEXW0)
5864 i.vex.register_specifier = i.op[0].regs;
5866 i.vex.register_specifier = i.op[1].regs;
5869 /* Destination is a XMM register encoded in the ModRM.reg
5871 i.rm.reg = i.op[2].regs->reg_num;
5872 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5875 /* ModRM.rm and VEX.B encodes the other source. */
5876 if (!i.mem_operands)
5880 if (i.tm.opcode_modifier.vexw == VEXW0)
5881 i.rm.regmem = i.op[1].regs->reg_num;
5883 i.rm.regmem = i.op[0].regs->reg_num;
5885 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5889 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5891 i.vex.register_specifier = i.op[2].regs;
5892 if (!i.mem_operands)
5895 i.rm.regmem = i.op[1].regs->reg_num;
5896 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5900 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5901 (if any) based on i.tm.extension_opcode. Again, we must be
5902 careful to make sure that segment/control/debug/test/MMX
5903 registers are coded into the i.rm.reg field. */
5904 else if (i.reg_operands)
5907 unsigned int vex_reg = ~0;
5909 for (op = 0; op < i.operands; op++)
5910 if (i.types[op].bitfield.reg8
5911 || i.types[op].bitfield.reg16
5912 || i.types[op].bitfield.reg32
5913 || i.types[op].bitfield.reg64
5914 || i.types[op].bitfield.regmmx
5915 || i.types[op].bitfield.regxmm
5916 || i.types[op].bitfield.regymm
5917 || i.types[op].bitfield.sreg2
5918 || i.types[op].bitfield.sreg3
5919 || i.types[op].bitfield.control
5920 || i.types[op].bitfield.debug
5921 || i.types[op].bitfield.test)
5926 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5928 /* For instructions with VexNDS, the register-only
5929 source operand is encoded in VEX prefix. */
5930 gas_assert (mem != (unsigned int) ~0);
5935 gas_assert (op < i.operands);
5939 /* Check register-only source operand when two source
5940 operands are swapped. */
5941 if (!i.tm.operand_types[op].bitfield.baseindex
5942 && i.tm.operand_types[op + 1].bitfield.baseindex)
5946 gas_assert (mem == (vex_reg + 1)
5947 && op < i.operands);
5952 gas_assert (vex_reg < i.operands);
5956 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5958 /* For instructions with VexNDD, the register destination
5959 is encoded in VEX prefix. */
5960 if (i.mem_operands == 0)
5962 /* There is no memory operand. */
5963 gas_assert ((op + 2) == i.operands);
5968 /* There are only 2 operands. */
5969 gas_assert (op < 2 && i.operands == 2);
5974 gas_assert (op < i.operands);
5976 if (vex_reg != (unsigned int) ~0)
5978 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5980 if (type->bitfield.reg32 != 1
5981 && type->bitfield.reg64 != 1
5982 && !operand_type_equal (type, ®xmm)
5983 && !operand_type_equal (type, ®ymm))
5986 i.vex.register_specifier = i.op[vex_reg].regs;
5989 /* Don't set OP operand twice. */
5992 /* If there is an extension opcode to put here, the
5993 register number must be put into the regmem field. */
5994 if (i.tm.extension_opcode != None)
5996 i.rm.regmem = i.op[op].regs->reg_num;
5997 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6002 i.rm.reg = i.op[op].regs->reg_num;
6003 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6008 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6009 must set it to 3 to indicate this is a register operand
6010 in the regmem field. */
6011 if (!i.mem_operands)
6015 /* Fill in i.rm.reg field with extension opcode (if any). */
6016 if (i.tm.extension_opcode != None)
6017 i.rm.reg = i.tm.extension_opcode;
6023 output_branch (void)
6029 relax_substateT subtype;
6033 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6034 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6037 if (i.prefix[DATA_PREFIX] != 0)
6043 /* Pentium4 branch hints. */
6044 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6045 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6050 if (i.prefix[REX_PREFIX] != 0)
6056 if (i.prefixes != 0 && !intel_syntax)
6057 as_warn (_("skipping prefixes on this instruction"));
6059 /* It's always a symbol; End frag & setup for relax.
6060 Make sure there is enough room in this frag for the largest
6061 instruction we may generate in md_convert_frag. This is 2
6062 bytes for the opcode and room for the prefix and largest
6064 frag_grow (prefix + 2 + 4);
6065 /* Prefix and 1 opcode byte go in fr_fix. */
6066 p = frag_more (prefix + 1);
6067 if (i.prefix[DATA_PREFIX] != 0)
6068 *p++ = DATA_PREFIX_OPCODE;
6069 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6070 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6071 *p++ = i.prefix[SEG_PREFIX];
6072 if (i.prefix[REX_PREFIX] != 0)
6073 *p++ = i.prefix[REX_PREFIX];
6074 *p = i.tm.base_opcode;
6076 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6077 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6078 else if (cpu_arch_flags.bitfield.cpui386)
6079 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6081 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6084 sym = i.op[0].disps->X_add_symbol;
6085 off = i.op[0].disps->X_add_number;
6087 if (i.op[0].disps->X_op != O_constant
6088 && i.op[0].disps->X_op != O_symbol)
6090 /* Handle complex expressions. */
6091 sym = make_expr_symbol (i.op[0].disps);
6095 /* 1 possible extra opcode + 4 byte displacement go in var part.
6096 Pass reloc in fr_var. */
6097 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6107 if (i.tm.opcode_modifier.jumpbyte)
6109 /* This is a loop or jecxz type instruction. */
6111 if (i.prefix[ADDR_PREFIX] != 0)
6113 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6116 /* Pentium4 branch hints. */
6117 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6118 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6120 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6129 if (flag_code == CODE_16BIT)
6132 if (i.prefix[DATA_PREFIX] != 0)
6134 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6144 if (i.prefix[REX_PREFIX] != 0)
6146 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6150 if (i.prefixes != 0 && !intel_syntax)
6151 as_warn (_("skipping prefixes on this instruction"));
6153 p = frag_more (i.tm.opcode_length + size);
6154 switch (i.tm.opcode_length)
6157 *p++ = i.tm.base_opcode >> 8;
6159 *p++ = i.tm.base_opcode;
6165 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6166 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6168 /* All jumps handled here are signed, but don't use a signed limit
6169 check for 32 and 16 bit jumps as we want to allow wrap around at
6170 4G and 64k respectively. */
6172 fixP->fx_signed = 1;
6176 output_interseg_jump (void)
6184 if (flag_code == CODE_16BIT)
6188 if (i.prefix[DATA_PREFIX] != 0)
6194 if (i.prefix[REX_PREFIX] != 0)
6204 if (i.prefixes != 0 && !intel_syntax)
6205 as_warn (_("skipping prefixes on this instruction"));
6207 /* 1 opcode; 2 segment; offset */
6208 p = frag_more (prefix + 1 + 2 + size);
6210 if (i.prefix[DATA_PREFIX] != 0)
6211 *p++ = DATA_PREFIX_OPCODE;
6213 if (i.prefix[REX_PREFIX] != 0)
6214 *p++ = i.prefix[REX_PREFIX];
6216 *p++ = i.tm.base_opcode;
6217 if (i.op[1].imms->X_op == O_constant)
6219 offsetT n = i.op[1].imms->X_add_number;
6222 && !fits_in_unsigned_word (n)
6223 && !fits_in_signed_word (n))
6225 as_bad (_("16-bit jump out of range"));
6228 md_number_to_chars (p, n, size);
6231 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6232 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6233 if (i.op[0].imms->X_op != O_constant)
6234 as_bad (_("can't handle non absolute segment in `%s'"),
6236 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6242 fragS *insn_start_frag;
6243 offsetT insn_start_off;
6245 /* Tie dwarf2 debug info to the address at the start of the insn.
6246 We can't do this after the insn has been output as the current
6247 frag may have been closed off. eg. by frag_var. */
6248 dwarf2_emit_insn (0);
6250 insn_start_frag = frag_now;
6251 insn_start_off = frag_now_fix ();
6254 if (i.tm.opcode_modifier.jump)
6256 else if (i.tm.opcode_modifier.jumpbyte
6257 || i.tm.opcode_modifier.jumpdword)
6259 else if (i.tm.opcode_modifier.jumpintersegment)
6260 output_interseg_jump ();
6263 /* Output normal instructions here. */
6267 unsigned int prefix;
6269 /* Since the VEX prefix contains the implicit prefix, we don't
6270 need the explicit prefix. */
6271 if (!i.tm.opcode_modifier.vex)
6273 switch (i.tm.opcode_length)
6276 if (i.tm.base_opcode & 0xff000000)
6278 prefix = (i.tm.base_opcode >> 24) & 0xff;
6283 if ((i.tm.base_opcode & 0xff0000) != 0)
6285 prefix = (i.tm.base_opcode >> 16) & 0xff;
6286 if (i.tm.cpu_flags.bitfield.cpupadlock)
6289 if (prefix != REPE_PREFIX_OPCODE
6290 || (i.prefix[REP_PREFIX]
6291 != REPE_PREFIX_OPCODE))
6292 add_prefix (prefix);
6295 add_prefix (prefix);
6304 /* The prefix bytes. */
6305 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6307 FRAG_APPEND_1_CHAR (*q);
6311 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6316 /* REX byte is encoded in VEX prefix. */
6320 FRAG_APPEND_1_CHAR (*q);
6323 /* There should be no other prefixes for instructions
6328 /* Now the VEX prefix. */
6329 p = frag_more (i.vex.length);
6330 for (j = 0; j < i.vex.length; j++)
6331 p[j] = i.vex.bytes[j];
6334 /* Now the opcode; be careful about word order here! */
6335 if (i.tm.opcode_length == 1)
6337 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6341 switch (i.tm.opcode_length)
6345 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6355 /* Put out high byte first: can't use md_number_to_chars! */
6356 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6357 *p = i.tm.base_opcode & 0xff;
6360 /* Now the modrm byte and sib byte (if present). */
6361 if (i.tm.opcode_modifier.modrm)
6363 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6366 /* If i.rm.regmem == ESP (4)
6367 && i.rm.mode != (Register mode)
6369 ==> need second modrm byte. */
6370 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6372 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6373 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6375 | i.sib.scale << 6));
6378 if (i.disp_operands)
6379 output_disp (insn_start_frag, insn_start_off);
6382 output_imm (insn_start_frag, insn_start_off);
6388 pi ("" /*line*/, &i);
6390 #endif /* DEBUG386 */
6393 /* Return the size of the displacement operand N. */
6396 disp_size (unsigned int n)
6399 if (i.types[n].bitfield.disp64)
6401 else if (i.types[n].bitfield.disp8)
6403 else if (i.types[n].bitfield.disp16)
6408 /* Return the size of the immediate operand N. */
6411 imm_size (unsigned int n)
6414 if (i.types[n].bitfield.imm64)
6416 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6418 else if (i.types[n].bitfield.imm16)
6424 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6429 for (n = 0; n < i.operands; n++)
6431 if (operand_type_check (i.types[n], disp))
6433 if (i.op[n].disps->X_op == O_constant)
6435 int size = disp_size (n);
6438 val = offset_in_range (i.op[n].disps->X_add_number,
6440 p = frag_more (size);
6441 md_number_to_chars (p, val, size);
6445 enum bfd_reloc_code_real reloc_type;
6446 int size = disp_size (n);
6447 int sign = i.types[n].bitfield.disp32s;
6448 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6450 /* We can't have 8 bit displacement here. */
6451 gas_assert (!i.types[n].bitfield.disp8);
6453 /* The PC relative address is computed relative
6454 to the instruction boundary, so in case immediate
6455 fields follows, we need to adjust the value. */
6456 if (pcrel && i.imm_operands)
6461 for (n1 = 0; n1 < i.operands; n1++)
6462 if (operand_type_check (i.types[n1], imm))
6464 /* Only one immediate is allowed for PC
6465 relative address. */
6466 gas_assert (sz == 0);
6468 i.op[n].disps->X_add_number -= sz;
6470 /* We should find the immediate. */
6471 gas_assert (sz != 0);
6474 p = frag_more (size);
6475 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6477 && GOT_symbol == i.op[n].disps->X_add_symbol
6478 && (((reloc_type == BFD_RELOC_32
6479 || reloc_type == BFD_RELOC_X86_64_32S
6480 || (reloc_type == BFD_RELOC_64
6482 && (i.op[n].disps->X_op == O_symbol
6483 || (i.op[n].disps->X_op == O_add
6484 && ((symbol_get_value_expression
6485 (i.op[n].disps->X_op_symbol)->X_op)
6487 || reloc_type == BFD_RELOC_32_PCREL))
6491 if (insn_start_frag == frag_now)
6492 add = (p - frag_now->fr_literal) - insn_start_off;
6497 add = insn_start_frag->fr_fix - insn_start_off;
6498 for (fr = insn_start_frag->fr_next;
6499 fr && fr != frag_now; fr = fr->fr_next)
6501 add += p - frag_now->fr_literal;
6506 reloc_type = BFD_RELOC_386_GOTPC;
6507 i.op[n].imms->X_add_number += add;
6509 else if (reloc_type == BFD_RELOC_64)
6510 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6512 /* Don't do the adjustment for x86-64, as there
6513 the pcrel addressing is relative to the _next_
6514 insn, and that is taken care of in other code. */
6515 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6517 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6518 i.op[n].disps, pcrel, reloc_type);
6525 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6530 for (n = 0; n < i.operands; n++)
6532 if (operand_type_check (i.types[n], imm))
6534 if (i.op[n].imms->X_op == O_constant)
6536 int size = imm_size (n);
6539 val = offset_in_range (i.op[n].imms->X_add_number,
6541 p = frag_more (size);
6542 md_number_to_chars (p, val, size);
6546 /* Not absolute_section.
6547 Need a 32-bit fixup (don't support 8bit
6548 non-absolute imms). Try to support other
6550 enum bfd_reloc_code_real reloc_type;
6551 int size = imm_size (n);
6554 if (i.types[n].bitfield.imm32s
6555 && (i.suffix == QWORD_MNEM_SUFFIX
6556 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6561 p = frag_more (size);
6562 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6564 /* This is tough to explain. We end up with this one if we
6565 * have operands that look like
6566 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6567 * obtain the absolute address of the GOT, and it is strongly
6568 * preferable from a performance point of view to avoid using
6569 * a runtime relocation for this. The actual sequence of
6570 * instructions often look something like:
6575 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6577 * The call and pop essentially return the absolute address
6578 * of the label .L66 and store it in %ebx. The linker itself
6579 * will ultimately change the first operand of the addl so
6580 * that %ebx points to the GOT, but to keep things simple, the
6581 * .o file must have this operand set so that it generates not
6582 * the absolute address of .L66, but the absolute address of
6583 * itself. This allows the linker itself simply treat a GOTPC
6584 * relocation as asking for a pcrel offset to the GOT to be
6585 * added in, and the addend of the relocation is stored in the
6586 * operand field for the instruction itself.
6588 * Our job here is to fix the operand so that it would add
6589 * the correct offset so that %ebx would point to itself. The
6590 * thing that is tricky is that .-.L66 will point to the
6591 * beginning of the instruction, so we need to further modify
6592 * the operand so that it will point to itself. There are
6593 * other cases where you have something like:
6595 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6597 * and here no correction would be required. Internally in
6598 * the assembler we treat operands of this form as not being
6599 * pcrel since the '.' is explicitly mentioned, and I wonder
6600 * whether it would simplify matters to do it this way. Who
6601 * knows. In earlier versions of the PIC patches, the
6602 * pcrel_adjust field was used to store the correction, but
6603 * since the expression is not pcrel, I felt it would be
6604 * confusing to do it this way. */
6606 if ((reloc_type == BFD_RELOC_32
6607 || reloc_type == BFD_RELOC_X86_64_32S
6608 || reloc_type == BFD_RELOC_64)
6610 && GOT_symbol == i.op[n].imms->X_add_symbol
6611 && (i.op[n].imms->X_op == O_symbol
6612 || (i.op[n].imms->X_op == O_add
6613 && ((symbol_get_value_expression
6614 (i.op[n].imms->X_op_symbol)->X_op)
6619 if (insn_start_frag == frag_now)
6620 add = (p - frag_now->fr_literal) - insn_start_off;
6625 add = insn_start_frag->fr_fix - insn_start_off;
6626 for (fr = insn_start_frag->fr_next;
6627 fr && fr != frag_now; fr = fr->fr_next)
6629 add += p - frag_now->fr_literal;
6633 reloc_type = BFD_RELOC_386_GOTPC;
6635 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6637 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6638 i.op[n].imms->X_add_number += add;
6640 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6641 i.op[n].imms, 0, reloc_type);
6647 /* x86_cons_fix_new is called via the expression parsing code when a
6648 reloc is needed. We use this hook to get the correct .got reloc. */
6649 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6650 static int cons_sign = -1;
6653 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6656 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6658 got_reloc = NO_RELOC;
6661 if (exp->X_op == O_secrel)
6663 exp->X_op = O_symbol;
6664 r = BFD_RELOC_32_SECREL;
6668 fix_new_exp (frag, off, len, exp, 0, r);
6671 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6672 purpose of the `.dc.a' internal pseudo-op. */
6675 x86_address_bytes (void)
6677 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6679 return stdoutput->arch_info->bits_per_address / 8;
6682 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6684 # define lex_got(reloc, adjust, types) NULL
6686 /* Parse operands of the form
6687 <symbol>@GOTOFF+<nnn>
6688 and similar .plt or .got references.
6690 If we find one, set up the correct relocation in RELOC and copy the
6691 input string, minus the `@GOTOFF' into a malloc'd buffer for
6692 parsing by the calling routine. Return this buffer, and if ADJUST
6693 is non-null set it to the length of the string we removed from the
6694 input line. Otherwise return NULL. */
6696 lex_got (enum bfd_reloc_code_real *rel,
6698 i386_operand_type *types)
6700 /* Some of the relocations depend on the size of what field is to
6701 be relocated. But in our callers i386_immediate and i386_displacement
6702 we don't yet know the operand size (this will be set by insn
6703 matching). Hence we record the word32 relocation here,
6704 and adjust the reloc according to the real size in reloc(). */
6705 static const struct {
6708 const enum bfd_reloc_code_real rel[2];
6709 const i386_operand_type types64;
6711 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6712 BFD_RELOC_X86_64_PLTOFF64 },
6713 OPERAND_TYPE_IMM64 },
6714 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6715 BFD_RELOC_X86_64_PLT32 },
6716 OPERAND_TYPE_IMM32_32S_DISP32 },
6717 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6718 BFD_RELOC_X86_64_GOTPLT64 },
6719 OPERAND_TYPE_IMM64_DISP64 },
6720 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6721 BFD_RELOC_X86_64_GOTOFF64 },
6722 OPERAND_TYPE_IMM64_DISP64 },
6723 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6724 BFD_RELOC_X86_64_GOTPCREL },
6725 OPERAND_TYPE_IMM32_32S_DISP32 },
6726 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6727 BFD_RELOC_X86_64_TLSGD },
6728 OPERAND_TYPE_IMM32_32S_DISP32 },
6729 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6730 _dummy_first_bfd_reloc_code_real },
6731 OPERAND_TYPE_NONE },
6732 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6733 BFD_RELOC_X86_64_TLSLD },
6734 OPERAND_TYPE_IMM32_32S_DISP32 },
6735 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6736 BFD_RELOC_X86_64_GOTTPOFF },
6737 OPERAND_TYPE_IMM32_32S_DISP32 },
6738 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6739 BFD_RELOC_X86_64_TPOFF32 },
6740 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6741 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6742 _dummy_first_bfd_reloc_code_real },
6743 OPERAND_TYPE_NONE },
6744 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6745 BFD_RELOC_X86_64_DTPOFF32 },
6746 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6747 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6748 _dummy_first_bfd_reloc_code_real },
6749 OPERAND_TYPE_NONE },
6750 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6751 _dummy_first_bfd_reloc_code_real },
6752 OPERAND_TYPE_NONE },
6753 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6754 BFD_RELOC_X86_64_GOT32 },
6755 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6756 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6757 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6758 OPERAND_TYPE_IMM32_32S_DISP32 },
6759 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6760 BFD_RELOC_X86_64_TLSDESC_CALL },
6761 OPERAND_TYPE_IMM32_32S_DISP32 },
6766 #if defined (OBJ_MAYBE_ELF)
6771 for (cp = input_line_pointer; *cp != '@'; cp++)
6772 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6775 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6777 int len = gotrel[j].len;
6778 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6780 if (gotrel[j].rel[object_64bit] != 0)
6783 char *tmpbuf, *past_reloc;
6785 *rel = gotrel[j].rel[object_64bit];
6791 if (flag_code != CODE_64BIT)
6793 types->bitfield.imm32 = 1;
6794 types->bitfield.disp32 = 1;
6797 *types = gotrel[j].types64;
6800 if (GOT_symbol == NULL)
6801 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6803 /* The length of the first part of our input line. */
6804 first = cp - input_line_pointer;
6806 /* The second part goes from after the reloc token until
6807 (and including) an end_of_line char or comma. */
6808 past_reloc = cp + 1 + len;
6810 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6812 second = cp + 1 - past_reloc;
6814 /* Allocate and copy string. The trailing NUL shouldn't
6815 be necessary, but be safe. */
6816 tmpbuf = (char *) xmalloc (first + second + 2);
6817 memcpy (tmpbuf, input_line_pointer, first);
6818 if (second != 0 && *past_reloc != ' ')
6819 /* Replace the relocation token with ' ', so that
6820 errors like foo@GOTOFF1 will be detected. */
6821 tmpbuf[first++] = ' ';
6822 memcpy (tmpbuf + first, past_reloc, second);
6823 tmpbuf[first + second] = '\0';
6827 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6828 gotrel[j].str, 1 << (5 + object_64bit));
6833 /* Might be a symbol version string. Don't as_bad here. */
6842 /* Parse operands of the form
6843 <symbol>@SECREL32+<nnn>
6845 If we find one, set up the correct relocation in RELOC and copy the
6846 input string, minus the `@SECREL32' into a malloc'd buffer for
6847 parsing by the calling routine. Return this buffer, and if ADJUST
6848 is non-null set it to the length of the string we removed from the
6849 input line. Otherwise return NULL.
6851 This function is copied from the ELF version above adjusted for PE targets. */
6854 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6855 int *adjust ATTRIBUTE_UNUSED,
6856 i386_operand_type *types ATTRIBUTE_UNUSED)
6862 const enum bfd_reloc_code_real rel[2];
6863 const i386_operand_type types64;
6867 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6868 BFD_RELOC_32_SECREL },
6869 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6875 for (cp = input_line_pointer; *cp != '@'; cp++)
6876 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6879 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6881 int len = gotrel[j].len;
6883 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6885 if (gotrel[j].rel[object_64bit] != 0)
6888 char *tmpbuf, *past_reloc;
6890 *rel = gotrel[j].rel[object_64bit];
6896 if (flag_code != CODE_64BIT)
6898 types->bitfield.imm32 = 1;
6899 types->bitfield.disp32 = 1;
6902 *types = gotrel[j].types64;
6905 /* The length of the first part of our input line. */
6906 first = cp - input_line_pointer;
6908 /* The second part goes from after the reloc token until
6909 (and including) an end_of_line char or comma. */
6910 past_reloc = cp + 1 + len;
6912 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6914 second = cp + 1 - past_reloc;
6916 /* Allocate and copy string. The trailing NUL shouldn't
6917 be necessary, but be safe. */
6918 tmpbuf = (char *) xmalloc (first + second + 2);
6919 memcpy (tmpbuf, input_line_pointer, first);
6920 if (second != 0 && *past_reloc != ' ')
6921 /* Replace the relocation token with ' ', so that
6922 errors like foo@SECLREL321 will be detected. */
6923 tmpbuf[first++] = ' ';
6924 memcpy (tmpbuf + first, past_reloc, second);
6925 tmpbuf[first + second] = '\0';
6929 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6930 gotrel[j].str, 1 << (5 + object_64bit));
6935 /* Might be a symbol version string. Don't as_bad here. */
6942 x86_cons (expressionS *exp, int size)
6944 intel_syntax = -intel_syntax;
6947 if (size == 4 || (object_64bit && size == 8))
6949 /* Handle @GOTOFF and the like in an expression. */
6951 char *gotfree_input_line;
6954 save = input_line_pointer;
6955 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6956 if (gotfree_input_line)
6957 input_line_pointer = gotfree_input_line;
6961 if (gotfree_input_line)
6963 /* expression () has merrily parsed up to the end of line,
6964 or a comma - in the wrong buffer. Transfer how far
6965 input_line_pointer has moved to the right buffer. */
6966 input_line_pointer = (save
6967 + (input_line_pointer - gotfree_input_line)
6969 free (gotfree_input_line);
6970 if (exp->X_op == O_constant
6971 || exp->X_op == O_absent
6972 || exp->X_op == O_illegal
6973 || exp->X_op == O_register
6974 || exp->X_op == O_big)
6976 char c = *input_line_pointer;
6977 *input_line_pointer = 0;
6978 as_bad (_("missing or invalid expression `%s'"), save);
6979 *input_line_pointer = c;
6986 intel_syntax = -intel_syntax;
6989 i386_intel_simplify (exp);
6993 signed_cons (int size)
6995 if (flag_code == CODE_64BIT)
7003 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7010 if (exp.X_op == O_symbol)
7011 exp.X_op = O_secrel;
7013 emit_expr (&exp, 4);
7015 while (*input_line_pointer++ == ',');
7017 input_line_pointer--;
7018 demand_empty_rest_of_line ();
7023 i386_immediate (char *imm_start)
7025 char *save_input_line_pointer;
7026 char *gotfree_input_line;
7029 i386_operand_type types;
7031 operand_type_set (&types, ~0);
7033 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7035 as_bad (_("at most %d immediate operands are allowed"),
7036 MAX_IMMEDIATE_OPERANDS);
7040 exp = &im_expressions[i.imm_operands++];
7041 i.op[this_operand].imms = exp;
7043 if (is_space_char (*imm_start))
7046 save_input_line_pointer = input_line_pointer;
7047 input_line_pointer = imm_start;
7049 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7050 if (gotfree_input_line)
7051 input_line_pointer = gotfree_input_line;
7053 exp_seg = expression (exp);
7056 if (*input_line_pointer)
7057 as_bad (_("junk `%s' after expression"), input_line_pointer);
7059 input_line_pointer = save_input_line_pointer;
7060 if (gotfree_input_line)
7062 free (gotfree_input_line);
7064 if (exp->X_op == O_constant || exp->X_op == O_register)
7065 exp->X_op = O_illegal;
7068 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7072 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7073 i386_operand_type types, const char *imm_start)
7075 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7078 as_bad (_("missing or invalid immediate expression `%s'"),
7082 else if (exp->X_op == O_constant)
7084 /* Size it properly later. */
7085 i.types[this_operand].bitfield.imm64 = 1;
7086 /* If not 64bit, sign extend val. */
7087 if (flag_code != CODE_64BIT
7088 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7090 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7092 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7093 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7094 && exp_seg != absolute_section
7095 && exp_seg != text_section
7096 && exp_seg != data_section
7097 && exp_seg != bss_section
7098 && exp_seg != undefined_section
7099 && !bfd_is_com_section (exp_seg))
7101 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7105 else if (!intel_syntax && exp->X_op == O_register)
7108 as_bad (_("illegal immediate register operand %s"), imm_start);
7113 /* This is an address. The size of the address will be
7114 determined later, depending on destination register,
7115 suffix, or the default for the section. */
7116 i.types[this_operand].bitfield.imm8 = 1;
7117 i.types[this_operand].bitfield.imm16 = 1;
7118 i.types[this_operand].bitfield.imm32 = 1;
7119 i.types[this_operand].bitfield.imm32s = 1;
7120 i.types[this_operand].bitfield.imm64 = 1;
7121 i.types[this_operand] = operand_type_and (i.types[this_operand],
7129 i386_scale (char *scale)
7132 char *save = input_line_pointer;
7134 input_line_pointer = scale;
7135 val = get_absolute_expression ();
7140 i.log2_scale_factor = 0;
7143 i.log2_scale_factor = 1;
7146 i.log2_scale_factor = 2;
7149 i.log2_scale_factor = 3;
7153 char sep = *input_line_pointer;
7155 *input_line_pointer = '\0';
7156 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7158 *input_line_pointer = sep;
7159 input_line_pointer = save;
7163 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7165 as_warn (_("scale factor of %d without an index register"),
7166 1 << i.log2_scale_factor);
7167 i.log2_scale_factor = 0;
7169 scale = input_line_pointer;
7170 input_line_pointer = save;
7175 i386_displacement (char *disp_start, char *disp_end)
7179 char *save_input_line_pointer;
7180 char *gotfree_input_line;
7182 i386_operand_type bigdisp, types = anydisp;
7185 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7187 as_bad (_("at most %d displacement operands are allowed"),
7188 MAX_MEMORY_OPERANDS);
7192 operand_type_set (&bigdisp, 0);
7193 if ((i.types[this_operand].bitfield.jumpabsolute)
7194 || (!current_templates->start->opcode_modifier.jump
7195 && !current_templates->start->opcode_modifier.jumpdword))
7197 bigdisp.bitfield.disp32 = 1;
7198 override = (i.prefix[ADDR_PREFIX] != 0);
7199 if (flag_code == CODE_64BIT)
7203 bigdisp.bitfield.disp32s = 1;
7204 bigdisp.bitfield.disp64 = 1;
7207 else if ((flag_code == CODE_16BIT) ^ override)
7209 bigdisp.bitfield.disp32 = 0;
7210 bigdisp.bitfield.disp16 = 1;
7215 /* For PC-relative branches, the width of the displacement
7216 is dependent upon data size, not address size. */
7217 override = (i.prefix[DATA_PREFIX] != 0);
7218 if (flag_code == CODE_64BIT)
7220 if (override || i.suffix == WORD_MNEM_SUFFIX)
7221 bigdisp.bitfield.disp16 = 1;
7224 bigdisp.bitfield.disp32 = 1;
7225 bigdisp.bitfield.disp32s = 1;
7231 override = (i.suffix == (flag_code != CODE_16BIT
7233 : LONG_MNEM_SUFFIX));
7234 bigdisp.bitfield.disp32 = 1;
7235 if ((flag_code == CODE_16BIT) ^ override)
7237 bigdisp.bitfield.disp32 = 0;
7238 bigdisp.bitfield.disp16 = 1;
7242 i.types[this_operand] = operand_type_or (i.types[this_operand],
7245 exp = &disp_expressions[i.disp_operands];
7246 i.op[this_operand].disps = exp;
7248 save_input_line_pointer = input_line_pointer;
7249 input_line_pointer = disp_start;
7250 END_STRING_AND_SAVE (disp_end);
7252 #ifndef GCC_ASM_O_HACK
7253 #define GCC_ASM_O_HACK 0
7256 END_STRING_AND_SAVE (disp_end + 1);
7257 if (i.types[this_operand].bitfield.baseIndex
7258 && displacement_string_end[-1] == '+')
7260 /* This hack is to avoid a warning when using the "o"
7261 constraint within gcc asm statements.
7264 #define _set_tssldt_desc(n,addr,limit,type) \
7265 __asm__ __volatile__ ( \
7267 "movw %w1,2+%0\n\t" \
7269 "movb %b1,4+%0\n\t" \
7270 "movb %4,5+%0\n\t" \
7271 "movb $0,6+%0\n\t" \
7272 "movb %h1,7+%0\n\t" \
7274 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7276 This works great except that the output assembler ends
7277 up looking a bit weird if it turns out that there is
7278 no offset. You end up producing code that looks like:
7291 So here we provide the missing zero. */
7293 *displacement_string_end = '0';
7296 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7297 if (gotfree_input_line)
7298 input_line_pointer = gotfree_input_line;
7300 exp_seg = expression (exp);
7303 if (*input_line_pointer)
7304 as_bad (_("junk `%s' after expression"), input_line_pointer);
7306 RESTORE_END_STRING (disp_end + 1);
7308 input_line_pointer = save_input_line_pointer;
7309 if (gotfree_input_line)
7311 free (gotfree_input_line);
7313 if (exp->X_op == O_constant || exp->X_op == O_register)
7314 exp->X_op = O_illegal;
7317 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7319 RESTORE_END_STRING (disp_end);
7325 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7326 i386_operand_type types, const char *disp_start)
7328 i386_operand_type bigdisp;
7331 /* We do this to make sure that the section symbol is in
7332 the symbol table. We will ultimately change the relocation
7333 to be relative to the beginning of the section. */
7334 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7335 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7336 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7338 if (exp->X_op != O_symbol)
7341 if (S_IS_LOCAL (exp->X_add_symbol)
7342 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7343 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7344 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7345 exp->X_op = O_subtract;
7346 exp->X_op_symbol = GOT_symbol;
7347 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7348 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7349 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7350 i.reloc[this_operand] = BFD_RELOC_64;
7352 i.reloc[this_operand] = BFD_RELOC_32;
7355 else if (exp->X_op == O_absent
7356 || exp->X_op == O_illegal
7357 || exp->X_op == O_big)
7360 as_bad (_("missing or invalid displacement expression `%s'"),
7365 else if (flag_code == CODE_64BIT
7366 && !i.prefix[ADDR_PREFIX]
7367 && exp->X_op == O_constant)
7369 /* Since displacement is signed extended to 64bit, don't allow
7370 disp32 and turn off disp32s if they are out of range. */
7371 i.types[this_operand].bitfield.disp32 = 0;
7372 if (!fits_in_signed_long (exp->X_add_number))
7374 i.types[this_operand].bitfield.disp32s = 0;
7375 if (i.types[this_operand].bitfield.baseindex)
7377 as_bad (_("0x%lx out range of signed 32bit displacement"),
7378 (long) exp->X_add_number);
7384 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7385 else if (exp->X_op != O_constant
7386 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7387 && exp_seg != absolute_section
7388 && exp_seg != text_section
7389 && exp_seg != data_section
7390 && exp_seg != bss_section
7391 && exp_seg != undefined_section
7392 && !bfd_is_com_section (exp_seg))
7394 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7399 /* Check if this is a displacement only operand. */
7400 bigdisp = i.types[this_operand];
7401 bigdisp.bitfield.disp8 = 0;
7402 bigdisp.bitfield.disp16 = 0;
7403 bigdisp.bitfield.disp32 = 0;
7404 bigdisp.bitfield.disp32s = 0;
7405 bigdisp.bitfield.disp64 = 0;
7406 if (operand_type_all_zero (&bigdisp))
7407 i.types[this_operand] = operand_type_and (i.types[this_operand],
7413 /* Make sure the memory operand we've been dealt is valid.
7414 Return 1 on success, 0 on a failure. */
7417 i386_index_check (const char *operand_string)
7420 const char *kind = "base/index";
7421 #if INFER_ADDR_PREFIX
7427 if (current_templates->start->opcode_modifier.isstring
7428 && !current_templates->start->opcode_modifier.immext
7429 && (current_templates->end[-1].opcode_modifier.isstring
7432 /* Memory operands of string insns are special in that they only allow
7433 a single register (rDI, rSI, or rBX) as their memory address. */
7434 unsigned int expected;
7436 kind = "string address";
7438 if (current_templates->start->opcode_modifier.w)
7440 i386_operand_type type = current_templates->end[-1].operand_types[0];
7442 if (!type.bitfield.baseindex
7443 || ((!i.mem_operands != !intel_syntax)
7444 && current_templates->end[-1].operand_types[1]
7445 .bitfield.baseindex))
7446 type = current_templates->end[-1].operand_types[1];
7447 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7450 expected = 3 /* rBX */;
7452 if (!i.base_reg || i.index_reg
7453 || operand_type_check (i.types[this_operand], disp))
7455 else if (!(flag_code == CODE_64BIT
7456 ? i.prefix[ADDR_PREFIX]
7457 ? i.base_reg->reg_type.bitfield.reg32
7458 : i.base_reg->reg_type.bitfield.reg64
7459 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7460 ? i.base_reg->reg_type.bitfield.reg32
7461 : i.base_reg->reg_type.bitfield.reg16))
7463 else if (register_number (i.base_reg) != expected)
7470 for (j = 0; j < i386_regtab_size; ++j)
7471 if ((flag_code == CODE_64BIT
7472 ? i.prefix[ADDR_PREFIX]
7473 ? i386_regtab[j].reg_type.bitfield.reg32
7474 : i386_regtab[j].reg_type.bitfield.reg64
7475 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7476 ? i386_regtab[j].reg_type.bitfield.reg32
7477 : i386_regtab[j].reg_type.bitfield.reg16)
7478 && register_number(i386_regtab + j) == expected)
7480 gas_assert (j < i386_regtab_size);
7481 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7483 intel_syntax ? '[' : '(',
7485 i386_regtab[j].reg_name,
7486 intel_syntax ? ']' : ')');
7490 else if (flag_code == CODE_64BIT)
7493 && ((i.prefix[ADDR_PREFIX] == 0
7494 && !i.base_reg->reg_type.bitfield.reg64)
7495 || (i.prefix[ADDR_PREFIX]
7496 && !i.base_reg->reg_type.bitfield.reg32))
7498 || i.base_reg->reg_num !=
7499 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7501 && !(i.index_reg->reg_type.bitfield.regxmm
7502 || i.index_reg->reg_type.bitfield.regymm)
7503 && (!i.index_reg->reg_type.bitfield.baseindex
7504 || (i.prefix[ADDR_PREFIX] == 0
7505 && i.index_reg->reg_num != RegRiz
7506 && !i.index_reg->reg_type.bitfield.reg64
7508 || (i.prefix[ADDR_PREFIX]
7509 && i.index_reg->reg_num != RegEiz
7510 && !i.index_reg->reg_type.bitfield.reg32))))
7515 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7519 && (!i.base_reg->reg_type.bitfield.reg16
7520 || !i.base_reg->reg_type.bitfield.baseindex))
7522 && (!i.index_reg->reg_type.bitfield.reg16
7523 || !i.index_reg->reg_type.bitfield.baseindex
7525 && i.base_reg->reg_num < 6
7526 && i.index_reg->reg_num >= 6
7527 && i.log2_scale_factor == 0))))
7534 && !i.base_reg->reg_type.bitfield.reg32)
7536 && !i.index_reg->reg_type.bitfield.regxmm
7537 && !i.index_reg->reg_type.bitfield.regymm
7538 && ((!i.index_reg->reg_type.bitfield.reg32
7539 && i.index_reg->reg_num != RegEiz)
7540 || !i.index_reg->reg_type.bitfield.baseindex)))
7546 #if INFER_ADDR_PREFIX
7547 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7549 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7551 /* Change the size of any displacement too. At most one of
7552 Disp16 or Disp32 is set.
7553 FIXME. There doesn't seem to be any real need for separate
7554 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7555 Removing them would probably clean up the code quite a lot. */
7556 if (flag_code != CODE_64BIT
7557 && (i.types[this_operand].bitfield.disp16
7558 || i.types[this_operand].bitfield.disp32))
7559 i.types[this_operand]
7560 = operand_type_xor (i.types[this_operand], disp16_32);
7565 as_bad (_("`%s' is not a valid %s expression"),
7570 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7572 flag_code_names[i.prefix[ADDR_PREFIX]
7573 ? flag_code == CODE_32BIT
7582 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7586 i386_att_operand (char *operand_string)
7590 char *op_string = operand_string;
7592 if (is_space_char (*op_string))
7595 /* We check for an absolute prefix (differentiating,
7596 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7597 if (*op_string == ABSOLUTE_PREFIX)
7600 if (is_space_char (*op_string))
7602 i.types[this_operand].bitfield.jumpabsolute = 1;
7605 /* Check if operand is a register. */
7606 if ((r = parse_register (op_string, &end_op)) != NULL)
7608 i386_operand_type temp;
7610 /* Check for a segment override by searching for ':' after a
7611 segment register. */
7613 if (is_space_char (*op_string))
7615 if (*op_string == ':'
7616 && (r->reg_type.bitfield.sreg2
7617 || r->reg_type.bitfield.sreg3))
7622 i.seg[i.mem_operands] = &es;
7625 i.seg[i.mem_operands] = &cs;
7628 i.seg[i.mem_operands] = &ss;
7631 i.seg[i.mem_operands] = &ds;
7634 i.seg[i.mem_operands] = &fs;
7637 i.seg[i.mem_operands] = &gs;
7641 /* Skip the ':' and whitespace. */
7643 if (is_space_char (*op_string))
7646 if (!is_digit_char (*op_string)
7647 && !is_identifier_char (*op_string)
7648 && *op_string != '('
7649 && *op_string != ABSOLUTE_PREFIX)
7651 as_bad (_("bad memory operand `%s'"), op_string);
7654 /* Handle case of %es:*foo. */
7655 if (*op_string == ABSOLUTE_PREFIX)
7658 if (is_space_char (*op_string))
7660 i.types[this_operand].bitfield.jumpabsolute = 1;
7662 goto do_memory_reference;
7666 as_bad (_("junk `%s' after register"), op_string);
7670 temp.bitfield.baseindex = 0;
7671 i.types[this_operand] = operand_type_or (i.types[this_operand],
7673 i.types[this_operand].bitfield.unspecified = 0;
7674 i.op[this_operand].regs = r;
7677 else if (*op_string == REGISTER_PREFIX)
7679 as_bad (_("bad register name `%s'"), op_string);
7682 else if (*op_string == IMMEDIATE_PREFIX)
7685 if (i.types[this_operand].bitfield.jumpabsolute)
7687 as_bad (_("immediate operand illegal with absolute jump"));
7690 if (!i386_immediate (op_string))
7693 else if (is_digit_char (*op_string)
7694 || is_identifier_char (*op_string)
7695 || *op_string == '(')
7697 /* This is a memory reference of some sort. */
7700 /* Start and end of displacement string expression (if found). */
7701 char *displacement_string_start;
7702 char *displacement_string_end;
7704 do_memory_reference:
7705 if ((i.mem_operands == 1
7706 && !current_templates->start->opcode_modifier.isstring)
7707 || i.mem_operands == 2)
7709 as_bad (_("too many memory references for `%s'"),
7710 current_templates->start->name);
7714 /* Check for base index form. We detect the base index form by
7715 looking for an ')' at the end of the operand, searching
7716 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7718 base_string = op_string + strlen (op_string);
7721 if (is_space_char (*base_string))
7724 /* If we only have a displacement, set-up for it to be parsed later. */
7725 displacement_string_start = op_string;
7726 displacement_string_end = base_string + 1;
7728 if (*base_string == ')')
7731 unsigned int parens_balanced = 1;
7732 /* We've already checked that the number of left & right ()'s are
7733 equal, so this loop will not be infinite. */
7737 if (*base_string == ')')
7739 if (*base_string == '(')
7742 while (parens_balanced);
7744 temp_string = base_string;
7746 /* Skip past '(' and whitespace. */
7748 if (is_space_char (*base_string))
7751 if (*base_string == ','
7752 || ((i.base_reg = parse_register (base_string, &end_op))
7755 displacement_string_end = temp_string;
7757 i.types[this_operand].bitfield.baseindex = 1;
7761 base_string = end_op;
7762 if (is_space_char (*base_string))
7766 /* There may be an index reg or scale factor here. */
7767 if (*base_string == ',')
7770 if (is_space_char (*base_string))
7773 if ((i.index_reg = parse_register (base_string, &end_op))
7776 base_string = end_op;
7777 if (is_space_char (*base_string))
7779 if (*base_string == ',')
7782 if (is_space_char (*base_string))
7785 else if (*base_string != ')')
7787 as_bad (_("expecting `,' or `)' "
7788 "after index register in `%s'"),
7793 else if (*base_string == REGISTER_PREFIX)
7795 end_op = strchr (base_string, ',');
7798 as_bad (_("bad register name `%s'"), base_string);
7802 /* Check for scale factor. */
7803 if (*base_string != ')')
7805 char *end_scale = i386_scale (base_string);
7810 base_string = end_scale;
7811 if (is_space_char (*base_string))
7813 if (*base_string != ')')
7815 as_bad (_("expecting `)' "
7816 "after scale factor in `%s'"),
7821 else if (!i.index_reg)
7823 as_bad (_("expecting index register or scale factor "
7824 "after `,'; got '%c'"),
7829 else if (*base_string != ')')
7831 as_bad (_("expecting `,' or `)' "
7832 "after base register in `%s'"),
7837 else if (*base_string == REGISTER_PREFIX)
7839 end_op = strchr (base_string, ',');
7842 as_bad (_("bad register name `%s'"), base_string);
7847 /* If there's an expression beginning the operand, parse it,
7848 assuming displacement_string_start and
7849 displacement_string_end are meaningful. */
7850 if (displacement_string_start != displacement_string_end)
7852 if (!i386_displacement (displacement_string_start,
7853 displacement_string_end))
7857 /* Special case for (%dx) while doing input/output op. */
7859 && operand_type_equal (&i.base_reg->reg_type,
7860 ®16_inoutportreg)
7862 && i.log2_scale_factor == 0
7863 && i.seg[i.mem_operands] == 0
7864 && !operand_type_check (i.types[this_operand], disp))
7866 i.types[this_operand] = inoutportreg;
7870 if (i386_index_check (operand_string) == 0)
7872 i.types[this_operand].bitfield.mem = 1;
7877 /* It's not a memory operand; argh! */
7878 as_bad (_("invalid char %s beginning operand %d `%s'"),
7879 output_invalid (*op_string),
7884 return 1; /* Normal return. */
7887 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7888 that an rs_machine_dependent frag may reach. */
7891 i386_frag_max_var (fragS *frag)
7893 /* The only relaxable frags are for jumps.
7894 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7895 gas_assert (frag->fr_type == rs_machine_dependent);
7896 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7899 /* md_estimate_size_before_relax()
7901 Called just before relax() for rs_machine_dependent frags. The x86
7902 assembler uses these frags to handle variable size jump
7905 Any symbol that is now undefined will not become defined.
7906 Return the correct fr_subtype in the frag.
7907 Return the initial "guess for variable size of frag" to caller.
7908 The guess is actually the growth beyond the fixed part. Whatever
7909 we do to grow the fixed or variable part contributes to our
7913 md_estimate_size_before_relax (fragS *fragP, segT segment)
7915 /* We've already got fragP->fr_subtype right; all we have to do is
7916 check for un-relaxable symbols. On an ELF system, we can't relax
7917 an externally visible symbol, because it may be overridden by a
7919 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7920 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7922 && (S_IS_EXTERNAL (fragP->fr_symbol)
7923 || S_IS_WEAK (fragP->fr_symbol)
7924 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7925 & BSF_GNU_INDIRECT_FUNCTION))))
7927 #if defined (OBJ_COFF) && defined (TE_PE)
7928 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7929 && S_IS_WEAK (fragP->fr_symbol))
7933 /* Symbol is undefined in this segment, or we need to keep a
7934 reloc so that weak symbols can be overridden. */
7935 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7936 enum bfd_reloc_code_real reloc_type;
7937 unsigned char *opcode;
7940 if (fragP->fr_var != NO_RELOC)
7941 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7943 reloc_type = BFD_RELOC_16_PCREL;
7945 reloc_type = BFD_RELOC_32_PCREL;
7947 old_fr_fix = fragP->fr_fix;
7948 opcode = (unsigned char *) fragP->fr_opcode;
7950 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7953 /* Make jmp (0xeb) a (d)word displacement jump. */
7955 fragP->fr_fix += size;
7956 fix_new (fragP, old_fr_fix, size,
7958 fragP->fr_offset, 1,
7964 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7966 /* Negate the condition, and branch past an
7967 unconditional jump. */
7970 /* Insert an unconditional jump. */
7972 /* We added two extra opcode bytes, and have a two byte
7974 fragP->fr_fix += 2 + 2;
7975 fix_new (fragP, old_fr_fix + 2, 2,
7977 fragP->fr_offset, 1,
7984 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7989 fixP = fix_new (fragP, old_fr_fix, 1,
7991 fragP->fr_offset, 1,
7993 fixP->fx_signed = 1;
7997 /* This changes the byte-displacement jump 0x7N
7998 to the (d)word-displacement jump 0x0f,0x8N. */
7999 opcode[1] = opcode[0] + 0x10;
8000 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8001 /* We've added an opcode byte. */
8002 fragP->fr_fix += 1 + size;
8003 fix_new (fragP, old_fr_fix + 1, size,
8005 fragP->fr_offset, 1,
8010 BAD_CASE (fragP->fr_subtype);
8014 return fragP->fr_fix - old_fr_fix;
8017 /* Guess size depending on current relax state. Initially the relax
8018 state will correspond to a short jump and we return 1, because
8019 the variable part of the frag (the branch offset) is one byte
8020 long. However, we can relax a section more than once and in that
8021 case we must either set fr_subtype back to the unrelaxed state,
8022 or return the value for the appropriate branch. */
8023 return md_relax_table[fragP->fr_subtype].rlx_length;
8026 /* Called after relax() is finished.
8028 In: Address of frag.
8029 fr_type == rs_machine_dependent.
8030 fr_subtype is what the address relaxed to.
8032 Out: Any fixSs and constants are set up.
8033 Caller will turn frag into a ".space 0". */
8036 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8039 unsigned char *opcode;
8040 unsigned char *where_to_put_displacement = NULL;
8041 offsetT target_address;
8042 offsetT opcode_address;
8043 unsigned int extension = 0;
8044 offsetT displacement_from_opcode_start;
8046 opcode = (unsigned char *) fragP->fr_opcode;
8048 /* Address we want to reach in file space. */
8049 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8051 /* Address opcode resides at in file space. */
8052 opcode_address = fragP->fr_address + fragP->fr_fix;
8054 /* Displacement from opcode start to fill into instruction. */
8055 displacement_from_opcode_start = target_address - opcode_address;
8057 if ((fragP->fr_subtype & BIG) == 0)
8059 /* Don't have to change opcode. */
8060 extension = 1; /* 1 opcode + 1 displacement */
8061 where_to_put_displacement = &opcode[1];
8065 if (no_cond_jump_promotion
8066 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8067 as_warn_where (fragP->fr_file, fragP->fr_line,
8068 _("long jump required"));
8070 switch (fragP->fr_subtype)
8072 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8073 extension = 4; /* 1 opcode + 4 displacement */
8075 where_to_put_displacement = &opcode[1];
8078 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8079 extension = 2; /* 1 opcode + 2 displacement */
8081 where_to_put_displacement = &opcode[1];
8084 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8085 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8086 extension = 5; /* 2 opcode + 4 displacement */
8087 opcode[1] = opcode[0] + 0x10;
8088 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8089 where_to_put_displacement = &opcode[2];
8092 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8093 extension = 3; /* 2 opcode + 2 displacement */
8094 opcode[1] = opcode[0] + 0x10;
8095 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8096 where_to_put_displacement = &opcode[2];
8099 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8104 where_to_put_displacement = &opcode[3];
8108 BAD_CASE (fragP->fr_subtype);
8113 /* If size if less then four we are sure that the operand fits,
8114 but if it's 4, then it could be that the displacement is larger
8116 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8118 && ((addressT) (displacement_from_opcode_start - extension
8119 + ((addressT) 1 << 31))
8120 > (((addressT) 2 << 31) - 1)))
8122 as_bad_where (fragP->fr_file, fragP->fr_line,
8123 _("jump target out of range"));
8124 /* Make us emit 0. */
8125 displacement_from_opcode_start = extension;
8127 /* Now put displacement after opcode. */
8128 md_number_to_chars ((char *) where_to_put_displacement,
8129 (valueT) (displacement_from_opcode_start - extension),
8130 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8131 fragP->fr_fix += extension;
8134 /* Apply a fixup (fixP) to segment data, once it has been determined
8135 by our caller that we have all the info we need to fix it up.
8137 Parameter valP is the pointer to the value of the bits.
8139 On the 386, immediates, displacements, and data pointers are all in
8140 the same (little-endian) format, so we don't need to care about which
8144 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8146 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8147 valueT value = *valP;
8149 #if !defined (TE_Mach)
8152 switch (fixP->fx_r_type)
8158 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8161 case BFD_RELOC_X86_64_32S:
8162 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8165 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8168 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8173 if (fixP->fx_addsy != NULL
8174 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8175 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8176 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8177 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8178 && !use_rela_relocations)
8180 /* This is a hack. There should be a better way to handle this.
8181 This covers for the fact that bfd_install_relocation will
8182 subtract the current location (for partial_inplace, PC relative
8183 relocations); see more below. */
8187 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8190 value += fixP->fx_where + fixP->fx_frag->fr_address;
8192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8195 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8198 || (symbol_section_p (fixP->fx_addsy)
8199 && sym_seg != absolute_section))
8200 && !generic_force_reloc (fixP))
8202 /* Yes, we add the values in twice. This is because
8203 bfd_install_relocation subtracts them out again. I think
8204 bfd_install_relocation is broken, but I don't dare change
8206 value += fixP->fx_where + fixP->fx_frag->fr_address;
8210 #if defined (OBJ_COFF) && defined (TE_PE)
8211 /* For some reason, the PE format does not store a
8212 section address offset for a PC relative symbol. */
8213 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8214 || S_IS_WEAK (fixP->fx_addsy))
8215 value += md_pcrel_from (fixP);
8218 #if defined (OBJ_COFF) && defined (TE_PE)
8219 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8221 value -= S_GET_VALUE (fixP->fx_addsy);
8225 /* Fix a few things - the dynamic linker expects certain values here,
8226 and we must not disappoint it. */
8227 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8228 if (IS_ELF && fixP->fx_addsy)
8229 switch (fixP->fx_r_type)
8231 case BFD_RELOC_386_PLT32:
8232 case BFD_RELOC_X86_64_PLT32:
8233 /* Make the jump instruction point to the address of the operand. At
8234 runtime we merely add the offset to the actual PLT entry. */
8238 case BFD_RELOC_386_TLS_GD:
8239 case BFD_RELOC_386_TLS_LDM:
8240 case BFD_RELOC_386_TLS_IE_32:
8241 case BFD_RELOC_386_TLS_IE:
8242 case BFD_RELOC_386_TLS_GOTIE:
8243 case BFD_RELOC_386_TLS_GOTDESC:
8244 case BFD_RELOC_X86_64_TLSGD:
8245 case BFD_RELOC_X86_64_TLSLD:
8246 case BFD_RELOC_X86_64_GOTTPOFF:
8247 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8248 value = 0; /* Fully resolved at runtime. No addend. */
8250 case BFD_RELOC_386_TLS_LE:
8251 case BFD_RELOC_386_TLS_LDO_32:
8252 case BFD_RELOC_386_TLS_LE_32:
8253 case BFD_RELOC_X86_64_DTPOFF32:
8254 case BFD_RELOC_X86_64_DTPOFF64:
8255 case BFD_RELOC_X86_64_TPOFF32:
8256 case BFD_RELOC_X86_64_TPOFF64:
8257 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8260 case BFD_RELOC_386_TLS_DESC_CALL:
8261 case BFD_RELOC_X86_64_TLSDESC_CALL:
8262 value = 0; /* Fully resolved at runtime. No addend. */
8263 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8267 case BFD_RELOC_386_GOT32:
8268 case BFD_RELOC_X86_64_GOT32:
8269 value = 0; /* Fully resolved at runtime. No addend. */
8272 case BFD_RELOC_VTABLE_INHERIT:
8273 case BFD_RELOC_VTABLE_ENTRY:
8280 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8282 #endif /* !defined (TE_Mach) */
8284 /* Are we finished with this relocation now? */
8285 if (fixP->fx_addsy == NULL)
8287 #if defined (OBJ_COFF) && defined (TE_PE)
8288 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8291 /* Remember value for tc_gen_reloc. */
8292 fixP->fx_addnumber = value;
8293 /* Clear out the frag for now. */
8297 else if (use_rela_relocations)
8299 fixP->fx_no_overflow = 1;
8300 /* Remember value for tc_gen_reloc. */
8301 fixP->fx_addnumber = value;
8305 md_number_to_chars (p, value, fixP->fx_size);
8309 md_atof (int type, char *litP, int *sizeP)
8311 /* This outputs the LITTLENUMs in REVERSE order;
8312 in accord with the bigendian 386. */
8313 return ieee_md_atof (type, litP, sizeP, FALSE);
8316 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8319 output_invalid (int c)
8322 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8325 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8326 "(0x%x)", (unsigned char) c);
8327 return output_invalid_buf;
8330 /* REG_STRING starts *before* REGISTER_PREFIX. */
8332 static const reg_entry *
8333 parse_real_register (char *reg_string, char **end_op)
8335 char *s = reg_string;
8337 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8340 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8341 if (*s == REGISTER_PREFIX)
8344 if (is_space_char (*s))
8348 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8350 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8351 return (const reg_entry *) NULL;
8355 /* For naked regs, make sure that we are not dealing with an identifier.
8356 This prevents confusing an identifier like `eax_var' with register
8358 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8359 return (const reg_entry *) NULL;
8363 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8365 /* Handle floating point regs, allowing spaces in the (i) part. */
8366 if (r == i386_regtab /* %st is first entry of table */)
8368 if (is_space_char (*s))
8373 if (is_space_char (*s))
8375 if (*s >= '0' && *s <= '7')
8379 if (is_space_char (*s))
8384 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8389 /* We have "%st(" then garbage. */
8390 return (const reg_entry *) NULL;
8394 if (r == NULL || allow_pseudo_reg)
8397 if (operand_type_all_zero (&r->reg_type))
8398 return (const reg_entry *) NULL;
8400 if ((r->reg_type.bitfield.reg32
8401 || r->reg_type.bitfield.sreg3
8402 || r->reg_type.bitfield.control
8403 || r->reg_type.bitfield.debug
8404 || r->reg_type.bitfield.test)
8405 && !cpu_arch_flags.bitfield.cpui386)
8406 return (const reg_entry *) NULL;
8408 if (r->reg_type.bitfield.floatreg
8409 && !cpu_arch_flags.bitfield.cpu8087
8410 && !cpu_arch_flags.bitfield.cpu287
8411 && !cpu_arch_flags.bitfield.cpu387)
8412 return (const reg_entry *) NULL;
8414 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8415 return (const reg_entry *) NULL;
8417 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8418 return (const reg_entry *) NULL;
8420 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8421 return (const reg_entry *) NULL;
8423 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8424 if (!allow_index_reg
8425 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8426 return (const reg_entry *) NULL;
8428 if (((r->reg_flags & (RegRex64 | RegRex))
8429 || r->reg_type.bitfield.reg64)
8430 && (!cpu_arch_flags.bitfield.cpulm
8431 || !operand_type_equal (&r->reg_type, &control))
8432 && flag_code != CODE_64BIT)
8433 return (const reg_entry *) NULL;
8435 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8436 return (const reg_entry *) NULL;
8441 /* REG_STRING starts *before* REGISTER_PREFIX. */
8443 static const reg_entry *
8444 parse_register (char *reg_string, char **end_op)
8448 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8449 r = parse_real_register (reg_string, end_op);
8454 char *save = input_line_pointer;
8458 input_line_pointer = reg_string;
8459 c = get_symbol_end ();
8460 symbolP = symbol_find (reg_string);
8461 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8463 const expressionS *e = symbol_get_value_expression (symbolP);
8465 know (e->X_op == O_register);
8466 know (e->X_add_number >= 0
8467 && (valueT) e->X_add_number < i386_regtab_size);
8468 r = i386_regtab + e->X_add_number;
8469 *end_op = input_line_pointer;
8471 *input_line_pointer = c;
8472 input_line_pointer = save;
8478 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8481 char *end = input_line_pointer;
8484 r = parse_register (name, &input_line_pointer);
8485 if (r && end <= input_line_pointer)
8487 *nextcharP = *input_line_pointer;
8488 *input_line_pointer = 0;
8489 e->X_op = O_register;
8490 e->X_add_number = r - i386_regtab;
8493 input_line_pointer = end;
8495 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8499 md_operand (expressionS *e)
8504 switch (*input_line_pointer)
8506 case REGISTER_PREFIX:
8507 r = parse_real_register (input_line_pointer, &end);
8510 e->X_op = O_register;
8511 e->X_add_number = r - i386_regtab;
8512 input_line_pointer = end;
8517 gas_assert (intel_syntax);
8518 end = input_line_pointer++;
8520 if (*input_line_pointer == ']')
8522 ++input_line_pointer;
8523 e->X_op_symbol = make_expr_symbol (e);
8524 e->X_add_symbol = NULL;
8525 e->X_add_number = 0;
8531 input_line_pointer = end;
8538 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8539 const char *md_shortopts = "kVQ:sqn";
8541 const char *md_shortopts = "qn";
8544 #define OPTION_32 (OPTION_MD_BASE + 0)
8545 #define OPTION_64 (OPTION_MD_BASE + 1)
8546 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8547 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8548 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8549 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8550 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8551 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8552 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8553 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8554 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8555 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8556 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8557 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8558 #define OPTION_X32 (OPTION_MD_BASE + 14)
8560 struct option md_longopts[] =
8562 {"32", no_argument, NULL, OPTION_32},
8563 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8564 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8565 {"64", no_argument, NULL, OPTION_64},
8567 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8568 {"x32", no_argument, NULL, OPTION_X32},
8570 {"divide", no_argument, NULL, OPTION_DIVIDE},
8571 {"march", required_argument, NULL, OPTION_MARCH},
8572 {"mtune", required_argument, NULL, OPTION_MTUNE},
8573 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8574 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8575 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8576 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8577 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8578 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8579 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8580 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8581 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8582 {NULL, no_argument, NULL, 0}
8584 size_t md_longopts_size = sizeof (md_longopts);
8587 md_parse_option (int c, char *arg)
8595 optimize_align_code = 0;
8602 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8603 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8604 should be emitted or not. FIXME: Not implemented. */
8608 /* -V: SVR4 argument to print version ID. */
8610 print_version_id ();
8613 /* -k: Ignore for FreeBSD compatibility. */
8618 /* -s: On i386 Solaris, this tells the native assembler to use
8619 .stab instead of .stab.excl. We always use .stab anyhow. */
8622 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8623 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8626 const char **list, **l;
8628 list = bfd_target_list ();
8629 for (l = list; *l != NULL; l++)
8630 if (CONST_STRNEQ (*l, "elf64-x86-64")
8631 || strcmp (*l, "coff-x86-64") == 0
8632 || strcmp (*l, "pe-x86-64") == 0
8633 || strcmp (*l, "pei-x86-64") == 0
8634 || strcmp (*l, "mach-o-x86-64") == 0)
8636 default_arch = "x86_64";
8640 as_fatal (_("no compiled in support for x86_64"));
8646 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8650 const char **list, **l;
8652 list = bfd_target_list ();
8653 for (l = list; *l != NULL; l++)
8654 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8656 default_arch = "x86_64:32";
8660 as_fatal (_("no compiled in support for 32bit x86_64"));
8664 as_fatal (_("32bit x86_64 is only supported for ELF"));
8669 default_arch = "i386";
8673 #ifdef SVR4_COMMENT_CHARS
8678 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8680 for (s = i386_comment_chars; *s != '\0'; s++)
8684 i386_comment_chars = n;
8690 arch = xstrdup (arg);
8694 as_fatal (_("invalid -march= option: `%s'"), arg);
8695 next = strchr (arch, '+');
8698 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8700 if (strcmp (arch, cpu_arch [j].name) == 0)
8703 if (! cpu_arch[j].flags.bitfield.cpui386)
8706 cpu_arch_name = cpu_arch[j].name;
8707 cpu_sub_arch_name = NULL;
8708 cpu_arch_flags = cpu_arch[j].flags;
8709 cpu_arch_isa = cpu_arch[j].type;
8710 cpu_arch_isa_flags = cpu_arch[j].flags;
8711 if (!cpu_arch_tune_set)
8713 cpu_arch_tune = cpu_arch_isa;
8714 cpu_arch_tune_flags = cpu_arch_isa_flags;
8718 else if (*cpu_arch [j].name == '.'
8719 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8721 /* ISA entension. */
8722 i386_cpu_flags flags;
8724 if (!cpu_arch[j].negated)
8725 flags = cpu_flags_or (cpu_arch_flags,
8728 flags = cpu_flags_and_not (cpu_arch_flags,
8730 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8732 if (cpu_sub_arch_name)
8734 char *name = cpu_sub_arch_name;
8735 cpu_sub_arch_name = concat (name,
8737 (const char *) NULL);
8741 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8742 cpu_arch_flags = flags;
8743 cpu_arch_isa_flags = flags;
8749 if (j >= ARRAY_SIZE (cpu_arch))
8750 as_fatal (_("invalid -march= option: `%s'"), arg);
8754 while (next != NULL );
8759 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8760 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8762 if (strcmp (arg, cpu_arch [j].name) == 0)
8764 cpu_arch_tune_set = 1;
8765 cpu_arch_tune = cpu_arch [j].type;
8766 cpu_arch_tune_flags = cpu_arch[j].flags;
8770 if (j >= ARRAY_SIZE (cpu_arch))
8771 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8774 case OPTION_MMNEMONIC:
8775 if (strcasecmp (arg, "att") == 0)
8777 else if (strcasecmp (arg, "intel") == 0)
8780 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8783 case OPTION_MSYNTAX:
8784 if (strcasecmp (arg, "att") == 0)
8786 else if (strcasecmp (arg, "intel") == 0)
8789 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8792 case OPTION_MINDEX_REG:
8793 allow_index_reg = 1;
8796 case OPTION_MNAKED_REG:
8797 allow_naked_reg = 1;
8800 case OPTION_MOLD_GCC:
8804 case OPTION_MSSE2AVX:
8808 case OPTION_MSSE_CHECK:
8809 if (strcasecmp (arg, "error") == 0)
8810 sse_check = check_error;
8811 else if (strcasecmp (arg, "warning") == 0)
8812 sse_check = check_warning;
8813 else if (strcasecmp (arg, "none") == 0)
8814 sse_check = check_none;
8816 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8819 case OPTION_MOPERAND_CHECK:
8820 if (strcasecmp (arg, "error") == 0)
8821 operand_check = check_error;
8822 else if (strcasecmp (arg, "warning") == 0)
8823 operand_check = check_warning;
8824 else if (strcasecmp (arg, "none") == 0)
8825 operand_check = check_none;
8827 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8830 case OPTION_MAVXSCALAR:
8831 if (strcasecmp (arg, "128") == 0)
8833 else if (strcasecmp (arg, "256") == 0)
8836 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8845 #define MESSAGE_TEMPLATE \
8849 show_arch (FILE *stream, int ext, int check)
8851 static char message[] = MESSAGE_TEMPLATE;
8852 char *start = message + 27;
8854 int size = sizeof (MESSAGE_TEMPLATE);
8861 left = size - (start - message);
8862 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8864 /* Should it be skipped? */
8865 if (cpu_arch [j].skip)
8868 name = cpu_arch [j].name;
8869 len = cpu_arch [j].len;
8872 /* It is an extension. Skip if we aren't asked to show it. */
8883 /* It is an processor. Skip if we show only extension. */
8886 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8888 /* It is an impossible processor - skip. */
8892 /* Reserve 2 spaces for ", " or ",\0" */
8895 /* Check if there is any room. */
8903 p = mempcpy (p, name, len);
8907 /* Output the current message now and start a new one. */
8910 fprintf (stream, "%s\n", message);
8912 left = size - (start - message) - len - 2;
8914 gas_assert (left >= 0);
8916 p = mempcpy (p, name, len);
8921 fprintf (stream, "%s\n", message);
8925 md_show_usage (FILE *stream)
8927 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8928 fprintf (stream, _("\
8930 -V print assembler version number\n\
8933 fprintf (stream, _("\
8934 -n Do not optimize code alignment\n\
8935 -q quieten some warnings\n"));
8936 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8937 fprintf (stream, _("\
8940 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8941 || defined (TE_PE) || defined (TE_PEP))
8942 fprintf (stream, _("\
8943 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8945 #ifdef SVR4_COMMENT_CHARS
8946 fprintf (stream, _("\
8947 --divide do not treat `/' as a comment character\n"));
8949 fprintf (stream, _("\
8950 --divide ignored\n"));
8952 fprintf (stream, _("\
8953 -march=CPU[,+EXTENSION...]\n\
8954 generate code for CPU and EXTENSION, CPU is one of:\n"));
8955 show_arch (stream, 0, 1);
8956 fprintf (stream, _("\
8957 EXTENSION is combination of:\n"));
8958 show_arch (stream, 1, 0);
8959 fprintf (stream, _("\
8960 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8961 show_arch (stream, 0, 0);
8962 fprintf (stream, _("\
8963 -msse2avx encode SSE instructions with VEX prefix\n"));
8964 fprintf (stream, _("\
8965 -msse-check=[none|error|warning]\n\
8966 check SSE instructions\n"));
8967 fprintf (stream, _("\
8968 -moperand-check=[none|error|warning]\n\
8969 check operand combinations for validity\n"));
8970 fprintf (stream, _("\
8971 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8973 fprintf (stream, _("\
8974 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8975 fprintf (stream, _("\
8976 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8977 fprintf (stream, _("\
8978 -mindex-reg support pseudo index registers\n"));
8979 fprintf (stream, _("\
8980 -mnaked-reg don't require `%%' prefix for registers\n"));
8981 fprintf (stream, _("\
8982 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8985 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8986 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8987 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8989 /* Pick the target format to use. */
8992 i386_target_format (void)
8994 if (!strncmp (default_arch, "x86_64", 6))
8996 update_code_flag (CODE_64BIT, 1);
8997 if (default_arch[6] == '\0')
8998 x86_elf_abi = X86_64_ABI;
9000 x86_elf_abi = X86_64_X32_ABI;
9002 else if (!strcmp (default_arch, "i386"))
9003 update_code_flag (CODE_32BIT, 1);
9005 as_fatal (_("unknown architecture"));
9007 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
9008 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9009 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
9010 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9012 switch (OUTPUT_FLAVOR)
9014 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9015 case bfd_target_aout_flavour:
9016 return AOUT_TARGET_FORMAT;
9018 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9019 # if defined (TE_PE) || defined (TE_PEP)
9020 case bfd_target_coff_flavour:
9021 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9022 # elif defined (TE_GO32)
9023 case bfd_target_coff_flavour:
9026 case bfd_target_coff_flavour:
9030 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9031 case bfd_target_elf_flavour:
9035 switch (x86_elf_abi)
9038 format = ELF_TARGET_FORMAT;
9041 use_rela_relocations = 1;
9043 format = ELF_TARGET_FORMAT64;
9045 case X86_64_X32_ABI:
9046 use_rela_relocations = 1;
9048 disallow_64bit_reloc = 1;
9049 format = ELF_TARGET_FORMAT32;
9052 if (cpu_arch_isa == PROCESSOR_L1OM)
9054 if (x86_elf_abi != X86_64_ABI)
9055 as_fatal (_("Intel L1OM is 64bit only"));
9056 return ELF_TARGET_L1OM_FORMAT;
9058 if (cpu_arch_isa == PROCESSOR_K1OM)
9060 if (x86_elf_abi != X86_64_ABI)
9061 as_fatal (_("Intel K1OM is 64bit only"));
9062 return ELF_TARGET_K1OM_FORMAT;
9068 #if defined (OBJ_MACH_O)
9069 case bfd_target_mach_o_flavour:
9070 if (flag_code == CODE_64BIT)
9072 use_rela_relocations = 1;
9074 return "mach-o-x86-64";
9077 return "mach-o-i386";
9085 #endif /* OBJ_MAYBE_ more than one */
9087 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9089 i386_elf_emit_arch_note (void)
9091 if (IS_ELF && cpu_arch_name != NULL)
9094 asection *seg = now_seg;
9095 subsegT subseg = now_subseg;
9096 Elf_Internal_Note i_note;
9097 Elf_External_Note e_note;
9098 asection *note_secp;
9101 /* Create the .note section. */
9102 note_secp = subseg_new (".note", 0);
9103 bfd_set_section_flags (stdoutput,
9105 SEC_HAS_CONTENTS | SEC_READONLY);
9107 /* Process the arch string. */
9108 len = strlen (cpu_arch_name);
9110 i_note.namesz = len + 1;
9112 i_note.type = NT_ARCH;
9113 p = frag_more (sizeof (e_note.namesz));
9114 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9115 p = frag_more (sizeof (e_note.descsz));
9116 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9117 p = frag_more (sizeof (e_note.type));
9118 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9119 p = frag_more (len + 1);
9120 strcpy (p, cpu_arch_name);
9122 frag_align (2, 0, 0);
9124 subseg_set (seg, subseg);
9130 md_undefined_symbol (char *name)
9132 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9133 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9134 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9135 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9139 if (symbol_find (name))
9140 as_bad (_("GOT already in symbol table"));
9141 GOT_symbol = symbol_new (name, undefined_section,
9142 (valueT) 0, &zero_address_frag);
9149 /* Round up a section size to the appropriate boundary. */
9152 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9154 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9155 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9157 /* For a.out, force the section size to be aligned. If we don't do
9158 this, BFD will align it for us, but it will not write out the
9159 final bytes of the section. This may be a bug in BFD, but it is
9160 easier to fix it here since that is how the other a.out targets
9164 align = bfd_get_section_alignment (stdoutput, segment);
9165 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9172 /* On the i386, PC-relative offsets are relative to the start of the
9173 next instruction. That is, the address of the offset, plus its
9174 size, since the offset is always the last part of the insn. */
9177 md_pcrel_from (fixS *fixP)
9179 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9185 s_bss (int ignore ATTRIBUTE_UNUSED)
9189 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9191 obj_elf_section_change_hook ();
9193 temp = get_absolute_expression ();
9194 subseg_set (bss_section, (subsegT) temp);
9195 demand_empty_rest_of_line ();
9201 i386_validate_fix (fixS *fixp)
9203 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9205 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9209 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9214 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9216 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9223 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9226 bfd_reloc_code_real_type code;
9228 switch (fixp->fx_r_type)
9230 case BFD_RELOC_X86_64_PLT32:
9231 case BFD_RELOC_X86_64_GOT32:
9232 case BFD_RELOC_X86_64_GOTPCREL:
9233 case BFD_RELOC_386_PLT32:
9234 case BFD_RELOC_386_GOT32:
9235 case BFD_RELOC_386_GOTOFF:
9236 case BFD_RELOC_386_GOTPC:
9237 case BFD_RELOC_386_TLS_GD:
9238 case BFD_RELOC_386_TLS_LDM:
9239 case BFD_RELOC_386_TLS_LDO_32:
9240 case BFD_RELOC_386_TLS_IE_32:
9241 case BFD_RELOC_386_TLS_IE:
9242 case BFD_RELOC_386_TLS_GOTIE:
9243 case BFD_RELOC_386_TLS_LE_32:
9244 case BFD_RELOC_386_TLS_LE:
9245 case BFD_RELOC_386_TLS_GOTDESC:
9246 case BFD_RELOC_386_TLS_DESC_CALL:
9247 case BFD_RELOC_X86_64_TLSGD:
9248 case BFD_RELOC_X86_64_TLSLD:
9249 case BFD_RELOC_X86_64_DTPOFF32:
9250 case BFD_RELOC_X86_64_DTPOFF64:
9251 case BFD_RELOC_X86_64_GOTTPOFF:
9252 case BFD_RELOC_X86_64_TPOFF32:
9253 case BFD_RELOC_X86_64_TPOFF64:
9254 case BFD_RELOC_X86_64_GOTOFF64:
9255 case BFD_RELOC_X86_64_GOTPC32:
9256 case BFD_RELOC_X86_64_GOT64:
9257 case BFD_RELOC_X86_64_GOTPCREL64:
9258 case BFD_RELOC_X86_64_GOTPC64:
9259 case BFD_RELOC_X86_64_GOTPLT64:
9260 case BFD_RELOC_X86_64_PLTOFF64:
9261 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9262 case BFD_RELOC_X86_64_TLSDESC_CALL:
9264 case BFD_RELOC_VTABLE_ENTRY:
9265 case BFD_RELOC_VTABLE_INHERIT:
9267 case BFD_RELOC_32_SECREL:
9269 code = fixp->fx_r_type;
9271 case BFD_RELOC_X86_64_32S:
9272 if (!fixp->fx_pcrel)
9274 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9275 code = fixp->fx_r_type;
9281 switch (fixp->fx_size)
9284 as_bad_where (fixp->fx_file, fixp->fx_line,
9285 _("can not do %d byte pc-relative relocation"),
9287 code = BFD_RELOC_32_PCREL;
9289 case 1: code = BFD_RELOC_8_PCREL; break;
9290 case 2: code = BFD_RELOC_16_PCREL; break;
9291 case 4: code = BFD_RELOC_32_PCREL; break;
9293 case 8: code = BFD_RELOC_64_PCREL; break;
9299 switch (fixp->fx_size)
9302 as_bad_where (fixp->fx_file, fixp->fx_line,
9303 _("can not do %d byte relocation"),
9305 code = BFD_RELOC_32;
9307 case 1: code = BFD_RELOC_8; break;
9308 case 2: code = BFD_RELOC_16; break;
9309 case 4: code = BFD_RELOC_32; break;
9311 case 8: code = BFD_RELOC_64; break;
9318 if ((code == BFD_RELOC_32
9319 || code == BFD_RELOC_32_PCREL
9320 || code == BFD_RELOC_X86_64_32S)
9322 && fixp->fx_addsy == GOT_symbol)
9325 code = BFD_RELOC_386_GOTPC;
9327 code = BFD_RELOC_X86_64_GOTPC32;
9329 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9331 && fixp->fx_addsy == GOT_symbol)
9333 code = BFD_RELOC_X86_64_GOTPC64;
9336 rel = (arelent *) xmalloc (sizeof (arelent));
9337 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9338 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9340 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9342 if (!use_rela_relocations)
9344 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9345 vtable entry to be used in the relocation's section offset. */
9346 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9347 rel->address = fixp->fx_offset;
9348 #if defined (OBJ_COFF) && defined (TE_PE)
9349 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9350 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9355 /* Use the rela in 64bit mode. */
9358 if (disallow_64bit_reloc)
9361 case BFD_RELOC_X86_64_DTPOFF64:
9362 case BFD_RELOC_X86_64_TPOFF64:
9363 case BFD_RELOC_64_PCREL:
9364 case BFD_RELOC_X86_64_GOTOFF64:
9365 case BFD_RELOC_X86_64_GOT64:
9366 case BFD_RELOC_X86_64_GOTPCREL64:
9367 case BFD_RELOC_X86_64_GOTPC64:
9368 case BFD_RELOC_X86_64_GOTPLT64:
9369 case BFD_RELOC_X86_64_PLTOFF64:
9370 as_bad_where (fixp->fx_file, fixp->fx_line,
9371 _("cannot represent relocation type %s in x32 mode"),
9372 bfd_get_reloc_code_name (code));
9378 if (!fixp->fx_pcrel)
9379 rel->addend = fixp->fx_offset;
9383 case BFD_RELOC_X86_64_PLT32:
9384 case BFD_RELOC_X86_64_GOT32:
9385 case BFD_RELOC_X86_64_GOTPCREL:
9386 case BFD_RELOC_X86_64_TLSGD:
9387 case BFD_RELOC_X86_64_TLSLD:
9388 case BFD_RELOC_X86_64_GOTTPOFF:
9389 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9390 case BFD_RELOC_X86_64_TLSDESC_CALL:
9391 rel->addend = fixp->fx_offset - fixp->fx_size;
9394 rel->addend = (section->vma
9396 + fixp->fx_addnumber
9397 + md_pcrel_from (fixp));
9402 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9403 if (rel->howto == NULL)
9405 as_bad_where (fixp->fx_file, fixp->fx_line,
9406 _("cannot represent relocation type %s"),
9407 bfd_get_reloc_code_name (code));
9408 /* Set howto to a garbage value so that we can keep going. */
9409 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9410 gas_assert (rel->howto != NULL);
9416 #include "tc-i386-intel.c"
9419 tc_x86_parse_to_dw2regnum (expressionS *exp)
9421 int saved_naked_reg;
9422 char saved_register_dot;
9424 saved_naked_reg = allow_naked_reg;
9425 allow_naked_reg = 1;
9426 saved_register_dot = register_chars['.'];
9427 register_chars['.'] = '.';
9428 allow_pseudo_reg = 1;
9429 expression_and_evaluate (exp);
9430 allow_pseudo_reg = 0;
9431 register_chars['.'] = saved_register_dot;
9432 allow_naked_reg = saved_naked_reg;
9434 if (exp->X_op == O_register && exp->X_add_number >= 0)
9436 if ((addressT) exp->X_add_number < i386_regtab_size)
9438 exp->X_op = O_constant;
9439 exp->X_add_number = i386_regtab[exp->X_add_number]
9440 .dw2_regnum[flag_code >> 1];
9443 exp->X_op = O_illegal;
9448 tc_x86_frame_initial_instructions (void)
9450 static unsigned int sp_regno[2];
9452 if (!sp_regno[flag_code >> 1])
9454 char *saved_input = input_line_pointer;
9455 char sp[][4] = {"esp", "rsp"};
9458 input_line_pointer = sp[flag_code >> 1];
9459 tc_x86_parse_to_dw2regnum (&exp);
9460 gas_assert (exp.X_op == O_constant);
9461 sp_regno[flag_code >> 1] = exp.X_add_number;
9462 input_line_pointer = saved_input;
9465 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9466 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9470 x86_dwarf2_addr_size (void)
9472 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9473 if (x86_elf_abi == X86_64_X32_ABI)
9476 return bfd_arch_bits_per_address (stdoutput) / 8;
9480 i386_elf_section_type (const char *str, size_t len)
9482 if (flag_code == CODE_64BIT
9483 && len == sizeof ("unwind") - 1
9484 && strncmp (str, "unwind", 6) == 0)
9485 return SHT_X86_64_UNWIND;
9492 i386_solaris_fix_up_eh_frame (segT sec)
9494 if (flag_code == CODE_64BIT)
9495 elf_section_type (sec) = SHT_X86_64_UNWIND;
9501 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9505 exp.X_op = O_secrel;
9506 exp.X_add_symbol = symbol;
9507 exp.X_add_number = 0;
9508 emit_expr (&exp, size);
9512 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9513 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9516 x86_64_section_letter (int letter, char **ptr_msg)
9518 if (flag_code == CODE_64BIT)
9521 return SHF_X86_64_LARGE;
9523 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9526 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9531 x86_64_section_word (char *str, size_t len)
9533 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9534 return SHF_X86_64_LARGE;
9540 handle_large_common (int small ATTRIBUTE_UNUSED)
9542 if (flag_code != CODE_64BIT)
9544 s_comm_internal (0, elf_common_parse);
9545 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9549 static segT lbss_section;
9550 asection *saved_com_section_ptr = elf_com_section_ptr;
9551 asection *saved_bss_section = bss_section;
9553 if (lbss_section == NULL)
9555 flagword applicable;
9557 subsegT subseg = now_subseg;
9559 /* The .lbss section is for local .largecomm symbols. */
9560 lbss_section = subseg_new (".lbss", 0);
9561 applicable = bfd_applicable_section_flags (stdoutput);
9562 bfd_set_section_flags (stdoutput, lbss_section,
9563 applicable & SEC_ALLOC);
9564 seg_info (lbss_section)->bss = 1;
9566 subseg_set (seg, subseg);
9569 elf_com_section_ptr = &_bfd_elf_large_com_section;
9570 bss_section = lbss_section;
9572 s_comm_internal (0, elf_common_parse);
9574 elf_com_section_ptr = saved_com_section_ptr;
9575 bss_section = saved_bss_section;
9578 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */