1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
32 #include "safe-ctype.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
48 #define DEFAULT_ARCH "i386"
53 #define INLINE __inline__
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
69 #define HLE_PREFIX REP_PREFIX
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
92 #define END_OF_INSN '\0'
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
103 const insn_template *start;
104 const insn_template *end;
108 /* 386 operand encoding bytes: see 386 book for details of this. */
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
120 /* 386 opcode byte to code indirect addressing. */
129 /* x86 arch names, types and features */
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_check (int);
148 static void set_cpu_arch (int);
150 static void pe_directive_secrel (int);
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
183 static void s_bss (int);
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
189 static const char *default_arch = DEFAULT_ARCH;
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
201 /* 'md_assemble ()' gathers together information and puts it into a
208 const reg_entry *regs;
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
220 unsupported_with_intel_mnemonic,
223 invalid_vsib_address,
224 invalid_vector_register_set,
225 unsupported_vector_index_register
230 /* TM holds the template for the insn were currently assembling. */
233 /* SUFFIX holds the instruction size suffix for byte, word, dword
234 or qword, if given. */
237 /* OPERANDS gives the number of given operands. */
238 unsigned int operands;
240 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
241 of given register, displacement, memory operands and immediate
243 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
245 /* TYPES [i] is the type (see above #defines) which tells us how to
246 use OP[i] for the corresponding operand. */
247 i386_operand_type types[MAX_OPERANDS];
249 /* Displacement expression, immediate expression, or register for each
251 union i386_op op[MAX_OPERANDS];
253 /* Flags for operands. */
254 unsigned int flags[MAX_OPERANDS];
255 #define Operand_PCrel 1
257 /* Relocation type for operand */
258 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
260 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
261 the base index byte below. */
262 const reg_entry *base_reg;
263 const reg_entry *index_reg;
264 unsigned int log2_scale_factor;
266 /* SEG gives the seg_entries of this insn. They are zero unless
267 explicit segment overrides are given. */
268 const seg_entry *seg[2];
270 /* PREFIX holds all the given prefix opcodes (usually null).
271 PREFIXES is the number of prefix opcodes. */
272 unsigned int prefixes;
273 unsigned char prefix[MAX_PREFIXES];
275 /* RM and SIB are the modrm byte and the sib byte where the
276 addressing modes of this insn are encoded. */
282 /* Swap operand in encoding. */
283 unsigned int swap_operand;
285 /* Prefer 8bit or 32bit displacement in encoding. */
288 disp_encoding_default = 0,
293 /* Have HLE prefix. */
294 unsigned int have_hle;
297 enum i386_error error;
300 typedef struct _i386_insn i386_insn;
302 /* List of chars besides those in app.c:symbol_chars that can start an
303 operand. Used to prevent the scrubber eating vital white-space. */
304 const char extra_symbol_chars[] = "*%-(["
313 #if (defined (TE_I386AIX) \
314 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
315 && !defined (TE_GNU) \
316 && !defined (TE_LINUX) \
317 && !defined (TE_NACL) \
318 && !defined (TE_NETWARE) \
319 && !defined (TE_FreeBSD) \
320 && !defined (TE_DragonFly) \
321 && !defined (TE_NetBSD)))
322 /* This array holds the chars that always start a comment. If the
323 pre-processor is disabled, these aren't very useful. The option
324 --divide will remove '/' from this list. */
325 const char *i386_comment_chars = "#/";
326 #define SVR4_COMMENT_CHARS 1
327 #define PREFIX_SEPARATOR '\\'
330 const char *i386_comment_chars = "#";
331 #define PREFIX_SEPARATOR '/'
334 /* This array holds the chars that only start a comment at the beginning of
335 a line. If the line seems to have the form '# 123 filename'
336 .line and .file directives will appear in the pre-processed output.
337 Note that input_file.c hand checks for '#' at the beginning of the
338 first line of the input file. This is because the compiler outputs
339 #NO_APP at the beginning of its output.
340 Also note that comments started like this one will always work if
341 '/' isn't otherwise defined. */
342 const char line_comment_chars[] = "#/";
344 const char line_separator_chars[] = ";";
346 /* Chars that can be used to separate mant from exp in floating point
348 const char EXP_CHARS[] = "eE";
350 /* Chars that mean this number is a floating point constant
353 const char FLT_CHARS[] = "fFdDxX";
355 /* Tables for lexical analysis. */
356 static char mnemonic_chars[256];
357 static char register_chars[256];
358 static char operand_chars[256];
359 static char identifier_chars[256];
360 static char digit_chars[256];
362 /* Lexical macros. */
363 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
364 #define is_operand_char(x) (operand_chars[(unsigned char) x])
365 #define is_register_char(x) (register_chars[(unsigned char) x])
366 #define is_space_char(x) ((x) == ' ')
367 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
368 #define is_digit_char(x) (digit_chars[(unsigned char) x])
370 /* All non-digit non-letter characters that may occur in an operand. */
371 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
373 /* md_assemble() always leaves the strings it's passed unaltered. To
374 effect this we maintain a stack of saved characters that we've smashed
375 with '\0's (indicating end of strings for various sub-fields of the
376 assembler instruction). */
377 static char save_stack[32];
378 static char *save_stack_p;
379 #define END_STRING_AND_SAVE(s) \
380 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
381 #define RESTORE_END_STRING(s) \
382 do { *(s) = *--save_stack_p; } while (0)
384 /* The instruction we're assembling. */
387 /* Possible templates for current insn. */
388 static const templates *current_templates;
390 /* Per instruction expressionS buffers: max displacements & immediates. */
391 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
392 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
394 /* Current operand we are working on. */
395 static int this_operand = -1;
397 /* We support four different modes. FLAG_CODE variable is used to distinguish
405 static enum flag_code flag_code;
406 static unsigned int object_64bit;
407 static unsigned int disallow_64bit_reloc;
408 static int use_rela_relocations = 0;
410 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
411 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
412 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
414 /* The ELF ABI to use. */
422 static enum x86_elf_abi x86_elf_abi = I386_ABI;
425 /* The names used to print error messages. */
426 static const char *flag_code_names[] =
433 /* 1 for intel syntax,
435 static int intel_syntax = 0;
437 /* 1 for intel mnemonic,
438 0 if att mnemonic. */
439 static int intel_mnemonic = !SYSV386_COMPAT;
441 /* 1 if support old (<= 2.8.1) versions of gcc. */
442 static int old_gcc = OLDGCC_COMPAT;
444 /* 1 if pseudo registers are permitted. */
445 static int allow_pseudo_reg = 0;
447 /* 1 if register prefix % not required. */
448 static int allow_naked_reg = 0;
450 /* 1 if pseudo index register, eiz/riz, is allowed . */
451 static int allow_index_reg = 0;
453 static enum check_kind
459 sse_check, operand_check = check_warning;
461 /* Register prefix used for error message. */
462 static const char *register_prefix = "%";
464 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
465 leave, push, and pop instructions so that gcc has the same stack
466 frame as in 32 bit mode. */
467 static char stackop_size = '\0';
469 /* Non-zero to optimize code alignment. */
470 int optimize_align_code = 1;
472 /* Non-zero to quieten some warnings. */
473 static int quiet_warnings = 0;
476 static const char *cpu_arch_name = NULL;
477 static char *cpu_sub_arch_name = NULL;
479 /* CPU feature flags. */
480 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
482 /* If we have selected a cpu we are generating instructions for. */
483 static int cpu_arch_tune_set = 0;
485 /* Cpu we are generating instructions for. */
486 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
488 /* CPU feature flags of cpu we are generating instructions for. */
489 static i386_cpu_flags cpu_arch_tune_flags;
491 /* CPU instruction set architecture used. */
492 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
494 /* CPU feature flags of instruction set architecture used. */
495 i386_cpu_flags cpu_arch_isa_flags;
497 /* If set, conditional jumps are not automatically promoted to handle
498 larger than a byte offset. */
499 static unsigned int no_cond_jump_promotion = 0;
501 /* Encode SSE instructions with VEX prefix. */
502 static unsigned int sse2avx;
504 /* Encode scalar AVX instructions with specific vector length. */
511 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
512 static symbolS *GOT_symbol;
514 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
515 unsigned int x86_dwarf2_return_column;
517 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
518 int x86_cie_data_alignment;
520 /* Interface to relax_segment.
521 There are 3 major relax states for 386 jump insns because the
522 different types of jumps add different sizes to frags when we're
523 figuring out what sort of jump to choose to reach a given label. */
526 #define UNCOND_JUMP 0
528 #define COND_JUMP86 2
533 #define SMALL16 (SMALL | CODE16)
535 #define BIG16 (BIG | CODE16)
539 #define INLINE __inline__
545 #define ENCODE_RELAX_STATE(type, size) \
546 ((relax_substateT) (((type) << 2) | (size)))
547 #define TYPE_FROM_RELAX_STATE(s) \
549 #define DISP_SIZE_FROM_RELAX_STATE(s) \
550 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
552 /* This table is used by relax_frag to promote short jumps to long
553 ones where necessary. SMALL (short) jumps may be promoted to BIG
554 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
555 don't allow a short jump in a 32 bit code segment to be promoted to
556 a 16 bit offset jump because it's slower (requires data size
557 prefix), and doesn't work, unless the destination is in the bottom
558 64k of the code segment (The top 16 bits of eip are zeroed). */
560 const relax_typeS md_relax_table[] =
563 1) most positive reach of this state,
564 2) most negative reach of this state,
565 3) how many bytes this mode will have in the variable part of the frag
566 4) which index into the table to try if we can't fit into this one. */
568 /* UNCOND_JUMP states. */
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
570 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
571 /* dword jmp adds 4 bytes to frag:
572 0 extra opcode bytes, 4 displacement bytes. */
574 /* word jmp adds 2 byte2 to frag:
575 0 extra opcode bytes, 2 displacement bytes. */
578 /* COND_JUMP states. */
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
580 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
581 /* dword conditionals adds 5 bytes to frag:
582 1 extra opcode byte, 4 displacement bytes. */
584 /* word conditionals add 3 bytes to frag:
585 1 extra opcode byte, 2 displacement bytes. */
588 /* COND_JUMP86 states. */
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
590 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
591 /* dword conditionals adds 5 bytes to frag:
592 1 extra opcode byte, 4 displacement bytes. */
594 /* word conditionals add 4 bytes to frag:
595 1 displacement byte and a 3 byte long branch insn. */
599 static const arch_entry cpu_arch[] =
601 /* Do not replace the first two entries - i386_target_format()
602 relies on them being there in this order. */
603 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
604 CPU_GENERIC32_FLAGS, 0, 0 },
605 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
606 CPU_GENERIC64_FLAGS, 0, 0 },
607 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
608 CPU_NONE_FLAGS, 0, 0 },
609 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
610 CPU_I186_FLAGS, 0, 0 },
611 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
612 CPU_I286_FLAGS, 0, 0 },
613 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
614 CPU_I386_FLAGS, 0, 0 },
615 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
616 CPU_I486_FLAGS, 0, 0 },
617 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
618 CPU_I586_FLAGS, 0, 0 },
619 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
620 CPU_I686_FLAGS, 0, 0 },
621 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
622 CPU_I586_FLAGS, 0, 0 },
623 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
624 CPU_PENTIUMPRO_FLAGS, 0, 0 },
625 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
626 CPU_P2_FLAGS, 0, 0 },
627 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
628 CPU_P3_FLAGS, 0, 0 },
629 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
630 CPU_P4_FLAGS, 0, 0 },
631 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
632 CPU_CORE_FLAGS, 0, 0 },
633 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
634 CPU_NOCONA_FLAGS, 0, 0 },
635 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
636 CPU_CORE_FLAGS, 1, 0 },
637 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
638 CPU_CORE_FLAGS, 0, 0 },
639 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
640 CPU_CORE2_FLAGS, 1, 0 },
641 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
642 CPU_CORE2_FLAGS, 0, 0 },
643 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
644 CPU_COREI7_FLAGS, 0, 0 },
645 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
646 CPU_L1OM_FLAGS, 0, 0 },
647 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
648 CPU_K1OM_FLAGS, 0, 0 },
649 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
650 CPU_K6_FLAGS, 0, 0 },
651 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
652 CPU_K6_2_FLAGS, 0, 0 },
653 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
654 CPU_ATHLON_FLAGS, 0, 0 },
655 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
656 CPU_K8_FLAGS, 1, 0 },
657 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
658 CPU_K8_FLAGS, 0, 0 },
659 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
660 CPU_K8_FLAGS, 0, 0 },
661 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
662 CPU_AMDFAM10_FLAGS, 0, 0 },
663 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
664 CPU_BDVER1_FLAGS, 0, 0 },
665 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
666 CPU_BDVER2_FLAGS, 0, 0 },
667 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
668 CPU_8087_FLAGS, 0, 0 },
669 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
670 CPU_287_FLAGS, 0, 0 },
671 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
672 CPU_387_FLAGS, 0, 0 },
673 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
674 CPU_ANY87_FLAGS, 0, 1 },
675 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
676 CPU_MMX_FLAGS, 0, 0 },
677 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
678 CPU_3DNOWA_FLAGS, 0, 1 },
679 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
680 CPU_SSE_FLAGS, 0, 0 },
681 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
682 CPU_SSE2_FLAGS, 0, 0 },
683 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
684 CPU_SSE3_FLAGS, 0, 0 },
685 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
686 CPU_SSSE3_FLAGS, 0, 0 },
687 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
688 CPU_SSE4_1_FLAGS, 0, 0 },
689 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
690 CPU_SSE4_2_FLAGS, 0, 0 },
691 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
692 CPU_SSE4_2_FLAGS, 0, 0 },
693 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
694 CPU_ANY_SSE_FLAGS, 0, 1 },
695 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
696 CPU_AVX_FLAGS, 0, 0 },
697 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
698 CPU_AVX2_FLAGS, 0, 0 },
699 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
700 CPU_ANY_AVX_FLAGS, 0, 1 },
701 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
702 CPU_VMX_FLAGS, 0, 0 },
703 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
704 CPU_VMFUNC_FLAGS, 0, 0 },
705 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
706 CPU_SMX_FLAGS, 0, 0 },
707 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
708 CPU_XSAVE_FLAGS, 0, 0 },
709 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
710 CPU_XSAVEOPT_FLAGS, 0, 0 },
711 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
712 CPU_AES_FLAGS, 0, 0 },
713 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
714 CPU_PCLMUL_FLAGS, 0, 0 },
715 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
716 CPU_PCLMUL_FLAGS, 1, 0 },
717 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
718 CPU_FSGSBASE_FLAGS, 0, 0 },
719 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
720 CPU_RDRND_FLAGS, 0, 0 },
721 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
722 CPU_F16C_FLAGS, 0, 0 },
723 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
724 CPU_BMI2_FLAGS, 0, 0 },
725 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
726 CPU_FMA_FLAGS, 0, 0 },
727 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
728 CPU_FMA4_FLAGS, 0, 0 },
729 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
730 CPU_XOP_FLAGS, 0, 0 },
731 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
732 CPU_LWP_FLAGS, 0, 0 },
733 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
734 CPU_MOVBE_FLAGS, 0, 0 },
735 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
736 CPU_EPT_FLAGS, 0, 0 },
737 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
738 CPU_LZCNT_FLAGS, 0, 0 },
739 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
740 CPU_HLE_FLAGS, 0, 0 },
741 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
742 CPU_RTM_FLAGS, 0, 0 },
743 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
744 CPU_INVPCID_FLAGS, 0, 0 },
745 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
746 CPU_CLFLUSH_FLAGS, 0, 0 },
747 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
748 CPU_NOP_FLAGS, 0, 0 },
749 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
750 CPU_SYSCALL_FLAGS, 0, 0 },
751 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
752 CPU_RDTSCP_FLAGS, 0, 0 },
753 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
754 CPU_3DNOW_FLAGS, 0, 0 },
755 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
756 CPU_3DNOWA_FLAGS, 0, 0 },
757 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
758 CPU_PADLOCK_FLAGS, 0, 0 },
759 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
760 CPU_SVME_FLAGS, 1, 0 },
761 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
762 CPU_SVME_FLAGS, 0, 0 },
763 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
764 CPU_SSE4A_FLAGS, 0, 0 },
765 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
766 CPU_ABM_FLAGS, 0, 0 },
767 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
768 CPU_BMI_FLAGS, 0, 0 },
769 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
770 CPU_TBM_FLAGS, 0, 0 },
771 { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
772 CPU_ADX_FLAGS, 0, 0 },
773 { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
774 CPU_RDSEED_FLAGS, 0, 0 },
775 { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
776 CPU_PRFCHW_FLAGS, 0, 0 },
780 /* Like s_lcomm_internal in gas/read.c but the alignment string
781 is allowed to be optional. */
784 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
791 && *input_line_pointer == ',')
793 align = parse_align (needs_align - 1);
795 if (align == (addressT) -1)
810 bss_alloc (symbolP, size, align);
815 pe_lcomm (int needs_align)
817 s_comm_internal (needs_align * 2, pe_lcomm_internal);
821 const pseudo_typeS md_pseudo_table[] =
823 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
824 {"align", s_align_bytes, 0},
826 {"align", s_align_ptwo, 0},
828 {"arch", set_cpu_arch, 0},
832 {"lcomm", pe_lcomm, 1},
834 {"ffloat", float_cons, 'f'},
835 {"dfloat", float_cons, 'd'},
836 {"tfloat", float_cons, 'x'},
838 {"slong", signed_cons, 4},
839 {"noopt", s_ignore, 0},
840 {"optim", s_ignore, 0},
841 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
842 {"code16", set_code_flag, CODE_16BIT},
843 {"code32", set_code_flag, CODE_32BIT},
844 {"code64", set_code_flag, CODE_64BIT},
845 {"intel_syntax", set_intel_syntax, 1},
846 {"att_syntax", set_intel_syntax, 0},
847 {"intel_mnemonic", set_intel_mnemonic, 1},
848 {"att_mnemonic", set_intel_mnemonic, 0},
849 {"allow_index_reg", set_allow_index_reg, 1},
850 {"disallow_index_reg", set_allow_index_reg, 0},
851 {"sse_check", set_check, 0},
852 {"operand_check", set_check, 1},
853 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
854 {"largecomm", handle_large_common, 0},
856 {"file", (void (*) (int)) dwarf2_directive_file, 0},
857 {"loc", dwarf2_directive_loc, 0},
858 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
861 {"secrel32", pe_directive_secrel, 0},
866 /* For interface with expression (). */
867 extern char *input_line_pointer;
869 /* Hash table for instruction mnemonic lookup. */
870 static struct hash_control *op_hash;
872 /* Hash table for register lookup. */
873 static struct hash_control *reg_hash;
876 i386_align_code (fragS *fragP, int count)
878 /* Various efficient no-op patterns for aligning code labels.
879 Note: Don't try to assemble the instructions in the comments.
880 0L and 0w are not legal. */
881 static const char f32_1[] =
883 static const char f32_2[] =
884 {0x66,0x90}; /* xchg %ax,%ax */
885 static const char f32_3[] =
886 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
887 static const char f32_4[] =
888 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
889 static const char f32_5[] =
891 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
892 static const char f32_6[] =
893 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
894 static const char f32_7[] =
895 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
896 static const char f32_8[] =
898 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
899 static const char f32_9[] =
900 {0x89,0xf6, /* movl %esi,%esi */
901 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
902 static const char f32_10[] =
903 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
904 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
905 static const char f32_11[] =
906 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
907 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
908 static const char f32_12[] =
909 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
910 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
911 static const char f32_13[] =
912 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
913 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
914 static const char f32_14[] =
915 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
916 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
917 static const char f16_3[] =
918 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
919 static const char f16_4[] =
920 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
921 static const char f16_5[] =
923 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
924 static const char f16_6[] =
925 {0x89,0xf6, /* mov %si,%si */
926 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
927 static const char f16_7[] =
928 {0x8d,0x74,0x00, /* lea 0(%si),%si */
929 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
930 static const char f16_8[] =
931 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
932 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
933 static const char jump_31[] =
934 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
935 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
936 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
937 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
938 static const char *const f32_patt[] = {
939 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
940 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
942 static const char *const f16_patt[] = {
943 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
946 static const char alt_3[] =
948 /* nopl 0(%[re]ax) */
949 static const char alt_4[] =
950 {0x0f,0x1f,0x40,0x00};
951 /* nopl 0(%[re]ax,%[re]ax,1) */
952 static const char alt_5[] =
953 {0x0f,0x1f,0x44,0x00,0x00};
954 /* nopw 0(%[re]ax,%[re]ax,1) */
955 static const char alt_6[] =
956 {0x66,0x0f,0x1f,0x44,0x00,0x00};
957 /* nopl 0L(%[re]ax) */
958 static const char alt_7[] =
959 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
960 /* nopl 0L(%[re]ax,%[re]ax,1) */
961 static const char alt_8[] =
962 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
963 /* nopw 0L(%[re]ax,%[re]ax,1) */
964 static const char alt_9[] =
965 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
966 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
967 static const char alt_10[] =
968 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
970 nopw %cs:0L(%[re]ax,%[re]ax,1) */
971 static const char alt_long_11[] =
973 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
976 nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_long_12[] =
980 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
984 nopw %cs:0L(%[re]ax,%[re]ax,1) */
985 static const char alt_long_13[] =
989 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
994 nopw %cs:0L(%[re]ax,%[re]ax,1) */
995 static const char alt_long_14[] =
1000 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1006 nopw %cs:0L(%[re]ax,%[re]ax,1) */
1007 static const char alt_long_15[] =
1013 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1014 /* nopl 0(%[re]ax,%[re]ax,1)
1015 nopw 0(%[re]ax,%[re]ax,1) */
1016 static const char alt_short_11[] =
1017 {0x0f,0x1f,0x44,0x00,0x00,
1018 0x66,0x0f,0x1f,0x44,0x00,0x00};
1019 /* nopw 0(%[re]ax,%[re]ax,1)
1020 nopw 0(%[re]ax,%[re]ax,1) */
1021 static const char alt_short_12[] =
1022 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1023 0x66,0x0f,0x1f,0x44,0x00,0x00};
1024 /* nopw 0(%[re]ax,%[re]ax,1)
1026 static const char alt_short_13[] =
1027 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1028 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1031 static const char alt_short_14[] =
1032 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1033 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1035 nopl 0L(%[re]ax,%[re]ax,1) */
1036 static const char alt_short_15[] =
1037 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1038 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1039 static const char *const alt_short_patt[] = {
1040 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1041 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1042 alt_short_14, alt_short_15
1044 static const char *const alt_long_patt[] = {
1045 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1046 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1047 alt_long_14, alt_long_15
1050 /* Only align for at least a positive non-zero boundary. */
1051 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1054 /* We need to decide which NOP sequence to use for 32bit and
1055 64bit. When -mtune= is used:
1057 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1058 PROCESSOR_GENERIC32, f32_patt will be used.
1059 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1060 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1061 PROCESSOR_GENERIC64, alt_long_patt will be used.
1062 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1063 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1066 When -mtune= isn't used, alt_long_patt will be used if
1067 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1070 When -march= or .arch is used, we can't use anything beyond
1071 cpu_arch_isa_flags. */
1073 if (flag_code == CODE_16BIT)
1077 memcpy (fragP->fr_literal + fragP->fr_fix,
1079 /* Adjust jump offset. */
1080 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1083 memcpy (fragP->fr_literal + fragP->fr_fix,
1084 f16_patt[count - 1], count);
1088 const char *const *patt = NULL;
1090 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1092 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1093 switch (cpu_arch_tune)
1095 case PROCESSOR_UNKNOWN:
1096 /* We use cpu_arch_isa_flags to check if we SHOULD
1097 optimize with nops. */
1098 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1099 patt = alt_long_patt;
1103 case PROCESSOR_PENTIUM4:
1104 case PROCESSOR_NOCONA:
1105 case PROCESSOR_CORE:
1106 case PROCESSOR_CORE2:
1107 case PROCESSOR_COREI7:
1108 case PROCESSOR_L1OM:
1109 case PROCESSOR_K1OM:
1110 case PROCESSOR_GENERIC64:
1111 patt = alt_long_patt;
1114 case PROCESSOR_ATHLON:
1116 case PROCESSOR_AMDFAM10:
1118 patt = alt_short_patt;
1120 case PROCESSOR_I386:
1121 case PROCESSOR_I486:
1122 case PROCESSOR_PENTIUM:
1123 case PROCESSOR_PENTIUMPRO:
1124 case PROCESSOR_GENERIC32:
1131 switch (fragP->tc_frag_data.tune)
1133 case PROCESSOR_UNKNOWN:
1134 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1135 PROCESSOR_UNKNOWN. */
1139 case PROCESSOR_I386:
1140 case PROCESSOR_I486:
1141 case PROCESSOR_PENTIUM:
1143 case PROCESSOR_ATHLON:
1145 case PROCESSOR_AMDFAM10:
1147 case PROCESSOR_GENERIC32:
1148 /* We use cpu_arch_isa_flags to check if we CAN optimize
1150 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1151 patt = alt_short_patt;
1155 case PROCESSOR_PENTIUMPRO:
1156 case PROCESSOR_PENTIUM4:
1157 case PROCESSOR_NOCONA:
1158 case PROCESSOR_CORE:
1159 case PROCESSOR_CORE2:
1160 case PROCESSOR_COREI7:
1161 case PROCESSOR_L1OM:
1162 case PROCESSOR_K1OM:
1163 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1164 patt = alt_long_patt;
1168 case PROCESSOR_GENERIC64:
1169 patt = alt_long_patt;
1174 if (patt == f32_patt)
1176 /* If the padding is less than 15 bytes, we use the normal
1177 ones. Otherwise, we use a jump instruction and adjust
1181 /* For 64bit, the limit is 3 bytes. */
1182 if (flag_code == CODE_64BIT
1183 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1188 memcpy (fragP->fr_literal + fragP->fr_fix,
1189 patt[count - 1], count);
1192 memcpy (fragP->fr_literal + fragP->fr_fix,
1194 /* Adjust jump offset. */
1195 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1200 /* Maximum length of an instruction is 15 byte. If the
1201 padding is greater than 15 bytes and we don't use jump,
1202 we have to break it into smaller pieces. */
1203 int padding = count;
1204 while (padding > 15)
1207 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1212 memcpy (fragP->fr_literal + fragP->fr_fix,
1213 patt [padding - 1], padding);
1216 fragP->fr_var = count;
1220 operand_type_all_zero (const union i386_operand_type *x)
1222 switch (ARRAY_SIZE(x->array))
1231 return !x->array[0];
1238 operand_type_set (union i386_operand_type *x, unsigned int v)
1240 switch (ARRAY_SIZE(x->array))
1255 operand_type_equal (const union i386_operand_type *x,
1256 const union i386_operand_type *y)
1258 switch (ARRAY_SIZE(x->array))
1261 if (x->array[2] != y->array[2])
1264 if (x->array[1] != y->array[1])
1267 return x->array[0] == y->array[0];
1275 cpu_flags_all_zero (const union i386_cpu_flags *x)
1277 switch (ARRAY_SIZE(x->array))
1286 return !x->array[0];
1293 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1295 switch (ARRAY_SIZE(x->array))
1310 cpu_flags_equal (const union i386_cpu_flags *x,
1311 const union i386_cpu_flags *y)
1313 switch (ARRAY_SIZE(x->array))
1316 if (x->array[2] != y->array[2])
1319 if (x->array[1] != y->array[1])
1322 return x->array[0] == y->array[0];
1330 cpu_flags_check_cpu64 (i386_cpu_flags f)
1332 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1333 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1336 static INLINE i386_cpu_flags
1337 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1339 switch (ARRAY_SIZE (x.array))
1342 x.array [2] &= y.array [2];
1344 x.array [1] &= y.array [1];
1346 x.array [0] &= y.array [0];
1354 static INLINE i386_cpu_flags
1355 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1357 switch (ARRAY_SIZE (x.array))
1360 x.array [2] |= y.array [2];
1362 x.array [1] |= y.array [1];
1364 x.array [0] |= y.array [0];
1372 static INLINE i386_cpu_flags
1373 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1375 switch (ARRAY_SIZE (x.array))
1378 x.array [2] &= ~y.array [2];
1380 x.array [1] &= ~y.array [1];
1382 x.array [0] &= ~y.array [0];
1390 #define CPU_FLAGS_ARCH_MATCH 0x1
1391 #define CPU_FLAGS_64BIT_MATCH 0x2
1392 #define CPU_FLAGS_AES_MATCH 0x4
1393 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1394 #define CPU_FLAGS_AVX_MATCH 0x10
1396 #define CPU_FLAGS_32BIT_MATCH \
1397 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1398 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1399 #define CPU_FLAGS_PERFECT_MATCH \
1400 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1402 /* Return CPU flags match bits. */
1405 cpu_flags_match (const insn_template *t)
1407 i386_cpu_flags x = t->cpu_flags;
1408 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1410 x.bitfield.cpu64 = 0;
1411 x.bitfield.cpuno64 = 0;
1413 if (cpu_flags_all_zero (&x))
1415 /* This instruction is available on all archs. */
1416 match |= CPU_FLAGS_32BIT_MATCH;
1420 /* This instruction is available only on some archs. */
1421 i386_cpu_flags cpu = cpu_arch_flags;
1423 cpu.bitfield.cpu64 = 0;
1424 cpu.bitfield.cpuno64 = 0;
1425 cpu = cpu_flags_and (x, cpu);
1426 if (!cpu_flags_all_zero (&cpu))
1428 if (x.bitfield.cpuavx)
1430 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1431 if (cpu.bitfield.cpuavx)
1433 /* Check SSE2AVX. */
1434 if (!t->opcode_modifier.sse2avx|| sse2avx)
1436 match |= (CPU_FLAGS_ARCH_MATCH
1437 | CPU_FLAGS_AVX_MATCH);
1439 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1440 match |= CPU_FLAGS_AES_MATCH;
1442 if (!x.bitfield.cpupclmul
1443 || cpu.bitfield.cpupclmul)
1444 match |= CPU_FLAGS_PCLMUL_MATCH;
1448 match |= CPU_FLAGS_ARCH_MATCH;
1451 match |= CPU_FLAGS_32BIT_MATCH;
1457 static INLINE i386_operand_type
1458 operand_type_and (i386_operand_type x, i386_operand_type y)
1460 switch (ARRAY_SIZE (x.array))
1463 x.array [2] &= y.array [2];
1465 x.array [1] &= y.array [1];
1467 x.array [0] &= y.array [0];
1475 static INLINE i386_operand_type
1476 operand_type_or (i386_operand_type x, i386_operand_type y)
1478 switch (ARRAY_SIZE (x.array))
1481 x.array [2] |= y.array [2];
1483 x.array [1] |= y.array [1];
1485 x.array [0] |= y.array [0];
1493 static INLINE i386_operand_type
1494 operand_type_xor (i386_operand_type x, i386_operand_type y)
1496 switch (ARRAY_SIZE (x.array))
1499 x.array [2] ^= y.array [2];
1501 x.array [1] ^= y.array [1];
1503 x.array [0] ^= y.array [0];
1511 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1512 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1513 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1514 static const i386_operand_type inoutportreg
1515 = OPERAND_TYPE_INOUTPORTREG;
1516 static const i386_operand_type reg16_inoutportreg
1517 = OPERAND_TYPE_REG16_INOUTPORTREG;
1518 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1519 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1520 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1521 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1522 static const i386_operand_type anydisp
1523 = OPERAND_TYPE_ANYDISP;
1524 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1525 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1526 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1527 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1528 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1529 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1530 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1531 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1532 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1533 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1534 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1535 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1546 operand_type_check (i386_operand_type t, enum operand_type c)
1551 return (t.bitfield.reg8
1554 || t.bitfield.reg64);
1557 return (t.bitfield.imm8
1561 || t.bitfield.imm32s
1562 || t.bitfield.imm64);
1565 return (t.bitfield.disp8
1566 || t.bitfield.disp16
1567 || t.bitfield.disp32
1568 || t.bitfield.disp32s
1569 || t.bitfield.disp64);
1572 return (t.bitfield.disp8
1573 || t.bitfield.disp16
1574 || t.bitfield.disp32
1575 || t.bitfield.disp32s
1576 || t.bitfield.disp64
1577 || t.bitfield.baseindex);
1586 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1587 operand J for instruction template T. */
1590 match_reg_size (const insn_template *t, unsigned int j)
1592 return !((i.types[j].bitfield.byte
1593 && !t->operand_types[j].bitfield.byte)
1594 || (i.types[j].bitfield.word
1595 && !t->operand_types[j].bitfield.word)
1596 || (i.types[j].bitfield.dword
1597 && !t->operand_types[j].bitfield.dword)
1598 || (i.types[j].bitfield.qword
1599 && !t->operand_types[j].bitfield.qword));
1602 /* Return 1 if there is no conflict in any size on operand J for
1603 instruction template T. */
1606 match_mem_size (const insn_template *t, unsigned int j)
1608 return (match_reg_size (t, j)
1609 && !((i.types[j].bitfield.unspecified
1610 && !t->operand_types[j].bitfield.unspecified)
1611 || (i.types[j].bitfield.fword
1612 && !t->operand_types[j].bitfield.fword)
1613 || (i.types[j].bitfield.tbyte
1614 && !t->operand_types[j].bitfield.tbyte)
1615 || (i.types[j].bitfield.xmmword
1616 && !t->operand_types[j].bitfield.xmmword)
1617 || (i.types[j].bitfield.ymmword
1618 && !t->operand_types[j].bitfield.ymmword)));
1621 /* Return 1 if there is no size conflict on any operands for
1622 instruction template T. */
1625 operand_size_match (const insn_template *t)
1630 /* Don't check jump instructions. */
1631 if (t->opcode_modifier.jump
1632 || t->opcode_modifier.jumpbyte
1633 || t->opcode_modifier.jumpdword
1634 || t->opcode_modifier.jumpintersegment)
1637 /* Check memory and accumulator operand size. */
1638 for (j = 0; j < i.operands; j++)
1640 if (t->operand_types[j].bitfield.anysize)
1643 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1649 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1658 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1661 i.error = operand_size_mismatch;
1665 /* Check reverse. */
1666 gas_assert (i.operands == 2);
1669 for (j = 0; j < 2; j++)
1671 if (t->operand_types[j].bitfield.acc
1672 && !match_reg_size (t, j ? 0 : 1))
1675 if (i.types[j].bitfield.mem
1676 && !match_mem_size (t, j ? 0 : 1))
1684 operand_type_match (i386_operand_type overlap,
1685 i386_operand_type given)
1687 i386_operand_type temp = overlap;
1689 temp.bitfield.jumpabsolute = 0;
1690 temp.bitfield.unspecified = 0;
1691 temp.bitfield.byte = 0;
1692 temp.bitfield.word = 0;
1693 temp.bitfield.dword = 0;
1694 temp.bitfield.fword = 0;
1695 temp.bitfield.qword = 0;
1696 temp.bitfield.tbyte = 0;
1697 temp.bitfield.xmmword = 0;
1698 temp.bitfield.ymmword = 0;
1699 if (operand_type_all_zero (&temp))
1702 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1703 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1707 i.error = operand_type_mismatch;
1711 /* If given types g0 and g1 are registers they must be of the same type
1712 unless the expected operand type register overlap is null.
1713 Note that Acc in a template matches every size of reg. */
1716 operand_type_register_match (i386_operand_type m0,
1717 i386_operand_type g0,
1718 i386_operand_type t0,
1719 i386_operand_type m1,
1720 i386_operand_type g1,
1721 i386_operand_type t1)
1723 if (!operand_type_check (g0, reg))
1726 if (!operand_type_check (g1, reg))
1729 if (g0.bitfield.reg8 == g1.bitfield.reg8
1730 && g0.bitfield.reg16 == g1.bitfield.reg16
1731 && g0.bitfield.reg32 == g1.bitfield.reg32
1732 && g0.bitfield.reg64 == g1.bitfield.reg64)
1735 if (m0.bitfield.acc)
1737 t0.bitfield.reg8 = 1;
1738 t0.bitfield.reg16 = 1;
1739 t0.bitfield.reg32 = 1;
1740 t0.bitfield.reg64 = 1;
1743 if (m1.bitfield.acc)
1745 t1.bitfield.reg8 = 1;
1746 t1.bitfield.reg16 = 1;
1747 t1.bitfield.reg32 = 1;
1748 t1.bitfield.reg64 = 1;
1751 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1752 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1753 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1754 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1757 i.error = register_type_mismatch;
1762 static INLINE unsigned int
1763 register_number (const reg_entry *r)
1765 unsigned int nr = r->reg_num;
1767 if (r->reg_flags & RegRex)
1773 static INLINE unsigned int
1774 mode_from_disp_size (i386_operand_type t)
1776 if (t.bitfield.disp8)
1778 else if (t.bitfield.disp16
1779 || t.bitfield.disp32
1780 || t.bitfield.disp32s)
1787 fits_in_signed_byte (offsetT num)
1789 return (num >= -128) && (num <= 127);
1793 fits_in_unsigned_byte (offsetT num)
1795 return (num & 0xff) == num;
1799 fits_in_unsigned_word (offsetT num)
1801 return (num & 0xffff) == num;
1805 fits_in_signed_word (offsetT num)
1807 return (-32768 <= num) && (num <= 32767);
1811 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1816 return (!(((offsetT) -1 << 31) & num)
1817 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1819 } /* fits_in_signed_long() */
1822 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1827 return (num & (((offsetT) 2 << 31) - 1)) == num;
1829 } /* fits_in_unsigned_long() */
1832 fits_in_imm4 (offsetT num)
1834 return (num & 0xf) == num;
1837 static i386_operand_type
1838 smallest_imm_type (offsetT num)
1840 i386_operand_type t;
1842 operand_type_set (&t, 0);
1843 t.bitfield.imm64 = 1;
1845 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1847 /* This code is disabled on the 486 because all the Imm1 forms
1848 in the opcode table are slower on the i486. They're the
1849 versions with the implicitly specified single-position
1850 displacement, which has another syntax if you really want to
1852 t.bitfield.imm1 = 1;
1853 t.bitfield.imm8 = 1;
1854 t.bitfield.imm8s = 1;
1855 t.bitfield.imm16 = 1;
1856 t.bitfield.imm32 = 1;
1857 t.bitfield.imm32s = 1;
1859 else if (fits_in_signed_byte (num))
1861 t.bitfield.imm8 = 1;
1862 t.bitfield.imm8s = 1;
1863 t.bitfield.imm16 = 1;
1864 t.bitfield.imm32 = 1;
1865 t.bitfield.imm32s = 1;
1867 else if (fits_in_unsigned_byte (num))
1869 t.bitfield.imm8 = 1;
1870 t.bitfield.imm16 = 1;
1871 t.bitfield.imm32 = 1;
1872 t.bitfield.imm32s = 1;
1874 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1876 t.bitfield.imm16 = 1;
1877 t.bitfield.imm32 = 1;
1878 t.bitfield.imm32s = 1;
1880 else if (fits_in_signed_long (num))
1882 t.bitfield.imm32 = 1;
1883 t.bitfield.imm32s = 1;
1885 else if (fits_in_unsigned_long (num))
1886 t.bitfield.imm32 = 1;
1892 offset_in_range (offsetT val, int size)
1898 case 1: mask = ((addressT) 1 << 8) - 1; break;
1899 case 2: mask = ((addressT) 1 << 16) - 1; break;
1900 case 4: mask = ((addressT) 2 << 31) - 1; break;
1902 case 8: mask = ((addressT) 2 << 63) - 1; break;
1908 /* If BFD64, sign extend val for 32bit address mode. */
1909 if (flag_code != CODE_64BIT
1910 || i.prefix[ADDR_PREFIX])
1911 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1912 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1915 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1917 char buf1[40], buf2[40];
1919 sprint_value (buf1, val);
1920 sprint_value (buf2, val & mask);
1921 as_warn (_("%s shortened to %s"), buf1, buf2);
1935 a. PREFIX_EXIST if attempting to add a prefix where one from the
1936 same class already exists.
1937 b. PREFIX_LOCK if lock prefix is added.
1938 c. PREFIX_REP if rep/repne prefix is added.
1939 d. PREFIX_OTHER if other prefix is added.
1942 static enum PREFIX_GROUP
1943 add_prefix (unsigned int prefix)
1945 enum PREFIX_GROUP ret = PREFIX_OTHER;
1948 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1949 && flag_code == CODE_64BIT)
1951 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1952 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1953 && (prefix & (REX_R | REX_X | REX_B))))
1964 case CS_PREFIX_OPCODE:
1965 case DS_PREFIX_OPCODE:
1966 case ES_PREFIX_OPCODE:
1967 case FS_PREFIX_OPCODE:
1968 case GS_PREFIX_OPCODE:
1969 case SS_PREFIX_OPCODE:
1973 case REPNE_PREFIX_OPCODE:
1974 case REPE_PREFIX_OPCODE:
1979 case LOCK_PREFIX_OPCODE:
1988 case ADDR_PREFIX_OPCODE:
1992 case DATA_PREFIX_OPCODE:
1996 if (i.prefix[q] != 0)
2004 i.prefix[q] |= prefix;
2007 as_bad (_("same type of prefix used twice"));
2013 update_code_flag (int value, int check)
2015 PRINTF_LIKE ((*as_error));
2017 flag_code = (enum flag_code) value;
2018 if (flag_code == CODE_64BIT)
2020 cpu_arch_flags.bitfield.cpu64 = 1;
2021 cpu_arch_flags.bitfield.cpuno64 = 0;
2025 cpu_arch_flags.bitfield.cpu64 = 0;
2026 cpu_arch_flags.bitfield.cpuno64 = 1;
2028 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2031 as_error = as_fatal;
2034 (*as_error) (_("64bit mode not supported on `%s'."),
2035 cpu_arch_name ? cpu_arch_name : default_arch);
2037 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2040 as_error = as_fatal;
2043 (*as_error) (_("32bit mode not supported on `%s'."),
2044 cpu_arch_name ? cpu_arch_name : default_arch);
2046 stackop_size = '\0';
2050 set_code_flag (int value)
2052 update_code_flag (value, 0);
2056 set_16bit_gcc_code_flag (int new_code_flag)
2058 flag_code = (enum flag_code) new_code_flag;
2059 if (flag_code != CODE_16BIT)
2061 cpu_arch_flags.bitfield.cpu64 = 0;
2062 cpu_arch_flags.bitfield.cpuno64 = 1;
2063 stackop_size = LONG_MNEM_SUFFIX;
2067 set_intel_syntax (int syntax_flag)
2069 /* Find out if register prefixing is specified. */
2070 int ask_naked_reg = 0;
2073 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2075 char *string = input_line_pointer;
2076 int e = get_symbol_end ();
2078 if (strcmp (string, "prefix") == 0)
2080 else if (strcmp (string, "noprefix") == 0)
2083 as_bad (_("bad argument to syntax directive."));
2084 *input_line_pointer = e;
2086 demand_empty_rest_of_line ();
2088 intel_syntax = syntax_flag;
2090 if (ask_naked_reg == 0)
2091 allow_naked_reg = (intel_syntax
2092 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2094 allow_naked_reg = (ask_naked_reg < 0);
2096 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2098 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2099 identifier_chars['$'] = intel_syntax ? '$' : 0;
2100 register_prefix = allow_naked_reg ? "" : "%";
2104 set_intel_mnemonic (int mnemonic_flag)
2106 intel_mnemonic = mnemonic_flag;
2110 set_allow_index_reg (int flag)
2112 allow_index_reg = flag;
2116 set_check (int what)
2118 enum check_kind *kind;
2123 kind = &operand_check;
2134 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2136 char *string = input_line_pointer;
2137 int e = get_symbol_end ();
2139 if (strcmp (string, "none") == 0)
2141 else if (strcmp (string, "warning") == 0)
2142 *kind = check_warning;
2143 else if (strcmp (string, "error") == 0)
2144 *kind = check_error;
2146 as_bad (_("bad argument to %s_check directive."), str);
2147 *input_line_pointer = e;
2150 as_bad (_("missing argument for %s_check directive"), str);
2152 demand_empty_rest_of_line ();
2156 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2157 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2159 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2160 static const char *arch;
2162 /* Intel LIOM is only supported on ELF. */
2168 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2169 use default_arch. */
2170 arch = cpu_arch_name;
2172 arch = default_arch;
2175 /* If we are targeting Intel L1OM, we must enable it. */
2176 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2177 || new_flag.bitfield.cpul1om)
2180 /* If we are targeting Intel K1OM, we must enable it. */
2181 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2182 || new_flag.bitfield.cpuk1om)
2185 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2190 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2194 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2196 char *string = input_line_pointer;
2197 int e = get_symbol_end ();
2199 i386_cpu_flags flags;
2201 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2203 if (strcmp (string, cpu_arch[j].name) == 0)
2205 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2209 cpu_arch_name = cpu_arch[j].name;
2210 cpu_sub_arch_name = NULL;
2211 cpu_arch_flags = cpu_arch[j].flags;
2212 if (flag_code == CODE_64BIT)
2214 cpu_arch_flags.bitfield.cpu64 = 1;
2215 cpu_arch_flags.bitfield.cpuno64 = 0;
2219 cpu_arch_flags.bitfield.cpu64 = 0;
2220 cpu_arch_flags.bitfield.cpuno64 = 1;
2222 cpu_arch_isa = cpu_arch[j].type;
2223 cpu_arch_isa_flags = cpu_arch[j].flags;
2224 if (!cpu_arch_tune_set)
2226 cpu_arch_tune = cpu_arch_isa;
2227 cpu_arch_tune_flags = cpu_arch_isa_flags;
2232 if (!cpu_arch[j].negated)
2233 flags = cpu_flags_or (cpu_arch_flags,
2236 flags = cpu_flags_and_not (cpu_arch_flags,
2238 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2240 if (cpu_sub_arch_name)
2242 char *name = cpu_sub_arch_name;
2243 cpu_sub_arch_name = concat (name,
2245 (const char *) NULL);
2249 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2250 cpu_arch_flags = flags;
2251 cpu_arch_isa_flags = flags;
2253 *input_line_pointer = e;
2254 demand_empty_rest_of_line ();
2258 if (j >= ARRAY_SIZE (cpu_arch))
2259 as_bad (_("no such architecture: `%s'"), string);
2261 *input_line_pointer = e;
2264 as_bad (_("missing cpu architecture"));
2266 no_cond_jump_promotion = 0;
2267 if (*input_line_pointer == ','
2268 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2270 char *string = ++input_line_pointer;
2271 int e = get_symbol_end ();
2273 if (strcmp (string, "nojumps") == 0)
2274 no_cond_jump_promotion = 1;
2275 else if (strcmp (string, "jumps") == 0)
2278 as_bad (_("no such architecture modifier: `%s'"), string);
2280 *input_line_pointer = e;
2283 demand_empty_rest_of_line ();
2286 enum bfd_architecture
2289 if (cpu_arch_isa == PROCESSOR_L1OM)
2291 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2292 || flag_code != CODE_64BIT)
2293 as_fatal (_("Intel L1OM is 64bit ELF only"));
2294 return bfd_arch_l1om;
2296 else if (cpu_arch_isa == PROCESSOR_K1OM)
2298 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2299 || flag_code != CODE_64BIT)
2300 as_fatal (_("Intel K1OM is 64bit ELF only"));
2301 return bfd_arch_k1om;
2304 return bfd_arch_i386;
2310 if (!strncmp (default_arch, "x86_64", 6))
2312 if (cpu_arch_isa == PROCESSOR_L1OM)
2314 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2315 || default_arch[6] != '\0')
2316 as_fatal (_("Intel L1OM is 64bit ELF only"));
2317 return bfd_mach_l1om;
2319 else if (cpu_arch_isa == PROCESSOR_K1OM)
2321 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2322 || default_arch[6] != '\0')
2323 as_fatal (_("Intel K1OM is 64bit ELF only"));
2324 return bfd_mach_k1om;
2326 else if (default_arch[6] == '\0')
2327 return bfd_mach_x86_64;
2329 return bfd_mach_x64_32;
2331 else if (!strcmp (default_arch, "i386"))
2332 return bfd_mach_i386_i386;
2334 as_fatal (_("unknown architecture"));
2340 const char *hash_err;
2342 /* Initialize op_hash hash table. */
2343 op_hash = hash_new ();
2346 const insn_template *optab;
2347 templates *core_optab;
2349 /* Setup for loop. */
2351 core_optab = (templates *) xmalloc (sizeof (templates));
2352 core_optab->start = optab;
2357 if (optab->name == NULL
2358 || strcmp (optab->name, (optab - 1)->name) != 0)
2360 /* different name --> ship out current template list;
2361 add to hash table; & begin anew. */
2362 core_optab->end = optab;
2363 hash_err = hash_insert (op_hash,
2365 (void *) core_optab);
2368 as_fatal (_("internal Error: Can't hash %s: %s"),
2372 if (optab->name == NULL)
2374 core_optab = (templates *) xmalloc (sizeof (templates));
2375 core_optab->start = optab;
2380 /* Initialize reg_hash hash table. */
2381 reg_hash = hash_new ();
2383 const reg_entry *regtab;
2384 unsigned int regtab_size = i386_regtab_size;
2386 for (regtab = i386_regtab; regtab_size--; regtab++)
2388 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2390 as_fatal (_("internal Error: Can't hash %s: %s"),
2396 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2401 for (c = 0; c < 256; c++)
2406 mnemonic_chars[c] = c;
2407 register_chars[c] = c;
2408 operand_chars[c] = c;
2410 else if (ISLOWER (c))
2412 mnemonic_chars[c] = c;
2413 register_chars[c] = c;
2414 operand_chars[c] = c;
2416 else if (ISUPPER (c))
2418 mnemonic_chars[c] = TOLOWER (c);
2419 register_chars[c] = mnemonic_chars[c];
2420 operand_chars[c] = c;
2423 if (ISALPHA (c) || ISDIGIT (c))
2424 identifier_chars[c] = c;
2427 identifier_chars[c] = c;
2428 operand_chars[c] = c;
2433 identifier_chars['@'] = '@';
2436 identifier_chars['?'] = '?';
2437 operand_chars['?'] = '?';
2439 digit_chars['-'] = '-';
2440 mnemonic_chars['_'] = '_';
2441 mnemonic_chars['-'] = '-';
2442 mnemonic_chars['.'] = '.';
2443 identifier_chars['_'] = '_';
2444 identifier_chars['.'] = '.';
2446 for (p = operand_special_chars; *p != '\0'; p++)
2447 operand_chars[(unsigned char) *p] = *p;
2450 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2453 record_alignment (text_section, 2);
2454 record_alignment (data_section, 2);
2455 record_alignment (bss_section, 2);
2459 if (flag_code == CODE_64BIT)
2461 #if defined (OBJ_COFF) && defined (TE_PE)
2462 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2465 x86_dwarf2_return_column = 16;
2467 x86_cie_data_alignment = -8;
2471 x86_dwarf2_return_column = 8;
2472 x86_cie_data_alignment = -4;
2477 i386_print_statistics (FILE *file)
2479 hash_print_statistics (file, "i386 opcode", op_hash);
2480 hash_print_statistics (file, "i386 register", reg_hash);
2485 /* Debugging routines for md_assemble. */
2486 static void pte (insn_template *);
2487 static void pt (i386_operand_type);
2488 static void pe (expressionS *);
2489 static void ps (symbolS *);
2492 pi (char *line, i386_insn *x)
2496 fprintf (stdout, "%s: template ", line);
2498 fprintf (stdout, " address: base %s index %s scale %x\n",
2499 x->base_reg ? x->base_reg->reg_name : "none",
2500 x->index_reg ? x->index_reg->reg_name : "none",
2501 x->log2_scale_factor);
2502 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2503 x->rm.mode, x->rm.reg, x->rm.regmem);
2504 fprintf (stdout, " sib: base %x index %x scale %x\n",
2505 x->sib.base, x->sib.index, x->sib.scale);
2506 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2507 (x->rex & REX_W) != 0,
2508 (x->rex & REX_R) != 0,
2509 (x->rex & REX_X) != 0,
2510 (x->rex & REX_B) != 0);
2511 for (j = 0; j < x->operands; j++)
2513 fprintf (stdout, " #%d: ", j + 1);
2515 fprintf (stdout, "\n");
2516 if (x->types[j].bitfield.reg8
2517 || x->types[j].bitfield.reg16
2518 || x->types[j].bitfield.reg32
2519 || x->types[j].bitfield.reg64
2520 || x->types[j].bitfield.regmmx
2521 || x->types[j].bitfield.regxmm
2522 || x->types[j].bitfield.regymm
2523 || x->types[j].bitfield.sreg2
2524 || x->types[j].bitfield.sreg3
2525 || x->types[j].bitfield.control
2526 || x->types[j].bitfield.debug
2527 || x->types[j].bitfield.test)
2528 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2529 if (operand_type_check (x->types[j], imm))
2531 if (operand_type_check (x->types[j], disp))
2532 pe (x->op[j].disps);
2537 pte (insn_template *t)
2540 fprintf (stdout, " %d operands ", t->operands);
2541 fprintf (stdout, "opcode %x ", t->base_opcode);
2542 if (t->extension_opcode != None)
2543 fprintf (stdout, "ext %x ", t->extension_opcode);
2544 if (t->opcode_modifier.d)
2545 fprintf (stdout, "D");
2546 if (t->opcode_modifier.w)
2547 fprintf (stdout, "W");
2548 fprintf (stdout, "\n");
2549 for (j = 0; j < t->operands; j++)
2551 fprintf (stdout, " #%d type ", j + 1);
2552 pt (t->operand_types[j]);
2553 fprintf (stdout, "\n");
2560 fprintf (stdout, " operation %d\n", e->X_op);
2561 fprintf (stdout, " add_number %ld (%lx)\n",
2562 (long) e->X_add_number, (long) e->X_add_number);
2563 if (e->X_add_symbol)
2565 fprintf (stdout, " add_symbol ");
2566 ps (e->X_add_symbol);
2567 fprintf (stdout, "\n");
2571 fprintf (stdout, " op_symbol ");
2572 ps (e->X_op_symbol);
2573 fprintf (stdout, "\n");
2580 fprintf (stdout, "%s type %s%s",
2582 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2583 segment_name (S_GET_SEGMENT (s)));
2586 static struct type_name
2588 i386_operand_type mask;
2591 const type_names[] =
2593 { OPERAND_TYPE_REG8, "r8" },
2594 { OPERAND_TYPE_REG16, "r16" },
2595 { OPERAND_TYPE_REG32, "r32" },
2596 { OPERAND_TYPE_REG64, "r64" },
2597 { OPERAND_TYPE_IMM8, "i8" },
2598 { OPERAND_TYPE_IMM8, "i8s" },
2599 { OPERAND_TYPE_IMM16, "i16" },
2600 { OPERAND_TYPE_IMM32, "i32" },
2601 { OPERAND_TYPE_IMM32S, "i32s" },
2602 { OPERAND_TYPE_IMM64, "i64" },
2603 { OPERAND_TYPE_IMM1, "i1" },
2604 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2605 { OPERAND_TYPE_DISP8, "d8" },
2606 { OPERAND_TYPE_DISP16, "d16" },
2607 { OPERAND_TYPE_DISP32, "d32" },
2608 { OPERAND_TYPE_DISP32S, "d32s" },
2609 { OPERAND_TYPE_DISP64, "d64" },
2610 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2611 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2612 { OPERAND_TYPE_CONTROL, "control reg" },
2613 { OPERAND_TYPE_TEST, "test reg" },
2614 { OPERAND_TYPE_DEBUG, "debug reg" },
2615 { OPERAND_TYPE_FLOATREG, "FReg" },
2616 { OPERAND_TYPE_FLOATACC, "FAcc" },
2617 { OPERAND_TYPE_SREG2, "SReg2" },
2618 { OPERAND_TYPE_SREG3, "SReg3" },
2619 { OPERAND_TYPE_ACC, "Acc" },
2620 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2621 { OPERAND_TYPE_REGMMX, "rMMX" },
2622 { OPERAND_TYPE_REGXMM, "rXMM" },
2623 { OPERAND_TYPE_REGYMM, "rYMM" },
2624 { OPERAND_TYPE_ESSEG, "es" },
2628 pt (i386_operand_type t)
2631 i386_operand_type a;
2633 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2635 a = operand_type_and (t, type_names[j].mask);
2636 if (!operand_type_all_zero (&a))
2637 fprintf (stdout, "%s, ", type_names[j].name);
2642 #endif /* DEBUG386 */
2644 static bfd_reloc_code_real_type
2645 reloc (unsigned int size,
2648 bfd_reloc_code_real_type other)
2650 if (other != NO_RELOC)
2652 reloc_howto_type *rel;
2657 case BFD_RELOC_X86_64_GOT32:
2658 return BFD_RELOC_X86_64_GOT64;
2660 case BFD_RELOC_X86_64_PLTOFF64:
2661 return BFD_RELOC_X86_64_PLTOFF64;
2663 case BFD_RELOC_X86_64_GOTPC32:
2664 other = BFD_RELOC_X86_64_GOTPC64;
2666 case BFD_RELOC_X86_64_GOTPCREL:
2667 other = BFD_RELOC_X86_64_GOTPCREL64;
2669 case BFD_RELOC_X86_64_TPOFF32:
2670 other = BFD_RELOC_X86_64_TPOFF64;
2672 case BFD_RELOC_X86_64_DTPOFF32:
2673 other = BFD_RELOC_X86_64_DTPOFF64;
2679 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2680 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2683 rel = bfd_reloc_type_lookup (stdoutput, other);
2685 as_bad (_("unknown relocation (%u)"), other);
2686 else if (size != bfd_get_reloc_size (rel))
2687 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2688 bfd_get_reloc_size (rel),
2690 else if (pcrel && !rel->pc_relative)
2691 as_bad (_("non-pc-relative relocation for pc-relative field"));
2692 else if ((rel->complain_on_overflow == complain_overflow_signed
2694 || (rel->complain_on_overflow == complain_overflow_unsigned
2696 as_bad (_("relocated field and relocation type differ in signedness"));
2705 as_bad (_("there are no unsigned pc-relative relocations"));
2708 case 1: return BFD_RELOC_8_PCREL;
2709 case 2: return BFD_RELOC_16_PCREL;
2710 case 4: return BFD_RELOC_32_PCREL;
2711 case 8: return BFD_RELOC_64_PCREL;
2713 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2720 case 4: return BFD_RELOC_X86_64_32S;
2725 case 1: return BFD_RELOC_8;
2726 case 2: return BFD_RELOC_16;
2727 case 4: return BFD_RELOC_32;
2728 case 8: return BFD_RELOC_64;
2730 as_bad (_("cannot do %s %u byte relocation"),
2731 sign > 0 ? "signed" : "unsigned", size);
2737 /* Here we decide which fixups can be adjusted to make them relative to
2738 the beginning of the section instead of the symbol. Basically we need
2739 to make sure that the dynamic relocations are done correctly, so in
2740 some cases we force the original symbol to be used. */
2743 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2745 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2749 /* Don't adjust pc-relative references to merge sections in 64-bit
2751 if (use_rela_relocations
2752 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2756 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2757 and changed later by validate_fix. */
2758 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2759 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2762 /* adjust_reloc_syms doesn't know about the GOT. */
2763 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2764 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2765 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2766 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2767 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2768 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2769 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2770 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2771 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2772 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2773 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2774 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2775 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2776 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2777 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2778 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2779 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2780 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2781 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2782 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2783 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2784 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2785 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2786 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2787 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2788 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2789 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2790 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2797 intel_float_operand (const char *mnemonic)
2799 /* Note that the value returned is meaningful only for opcodes with (memory)
2800 operands, hence the code here is free to improperly handle opcodes that
2801 have no operands (for better performance and smaller code). */
2803 if (mnemonic[0] != 'f')
2804 return 0; /* non-math */
2806 switch (mnemonic[1])
2808 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2809 the fs segment override prefix not currently handled because no
2810 call path can make opcodes without operands get here */
2812 return 2 /* integer op */;
2814 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2815 return 3; /* fldcw/fldenv */
2818 if (mnemonic[2] != 'o' /* fnop */)
2819 return 3; /* non-waiting control op */
2822 if (mnemonic[2] == 's')
2823 return 3; /* frstor/frstpm */
2826 if (mnemonic[2] == 'a')
2827 return 3; /* fsave */
2828 if (mnemonic[2] == 't')
2830 switch (mnemonic[3])
2832 case 'c': /* fstcw */
2833 case 'd': /* fstdw */
2834 case 'e': /* fstenv */
2835 case 's': /* fsts[gw] */
2841 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2842 return 0; /* fxsave/fxrstor are not really math ops */
2849 /* Build the VEX prefix. */
2852 build_vex_prefix (const insn_template *t)
2854 unsigned int register_specifier;
2855 unsigned int implied_prefix;
2856 unsigned int vector_length;
2858 /* Check register specifier. */
2859 if (i.vex.register_specifier)
2860 register_specifier = ~register_number (i.vex.register_specifier) & 0xf;
2862 register_specifier = 0xf;
2864 /* Use 2-byte VEX prefix by swappping destination and source
2867 && i.operands == i.reg_operands
2868 && i.tm.opcode_modifier.vexopcode == VEX0F
2869 && i.tm.opcode_modifier.s
2872 unsigned int xchg = i.operands - 1;
2873 union i386_op temp_op;
2874 i386_operand_type temp_type;
2876 temp_type = i.types[xchg];
2877 i.types[xchg] = i.types[0];
2878 i.types[0] = temp_type;
2879 temp_op = i.op[xchg];
2880 i.op[xchg] = i.op[0];
2883 gas_assert (i.rm.mode == 3);
2887 i.rm.regmem = i.rm.reg;
2890 /* Use the next insn. */
2894 if (i.tm.opcode_modifier.vex == VEXScalar)
2895 vector_length = avxscalar;
2897 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2899 switch ((i.tm.base_opcode >> 8) & 0xff)
2904 case DATA_PREFIX_OPCODE:
2907 case REPE_PREFIX_OPCODE:
2910 case REPNE_PREFIX_OPCODE:
2917 /* Use 2-byte VEX prefix if possible. */
2918 if (i.tm.opcode_modifier.vexopcode == VEX0F
2919 && i.tm.opcode_modifier.vexw != VEXW1
2920 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2922 /* 2-byte VEX prefix. */
2926 i.vex.bytes[0] = 0xc5;
2928 /* Check the REX.R bit. */
2929 r = (i.rex & REX_R) ? 0 : 1;
2930 i.vex.bytes[1] = (r << 7
2931 | register_specifier << 3
2932 | vector_length << 2
2937 /* 3-byte VEX prefix. */
2942 switch (i.tm.opcode_modifier.vexopcode)
2946 i.vex.bytes[0] = 0xc4;
2950 i.vex.bytes[0] = 0xc4;
2954 i.vex.bytes[0] = 0xc4;
2958 i.vex.bytes[0] = 0x8f;
2962 i.vex.bytes[0] = 0x8f;
2966 i.vex.bytes[0] = 0x8f;
2972 /* The high 3 bits of the second VEX byte are 1's compliment
2973 of RXB bits from REX. */
2974 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2976 /* Check the REX.W bit. */
2977 w = (i.rex & REX_W) ? 1 : 0;
2978 if (i.tm.opcode_modifier.vexw)
2983 if (i.tm.opcode_modifier.vexw == VEXW1)
2987 i.vex.bytes[2] = (w << 7
2988 | register_specifier << 3
2989 | vector_length << 2
2995 process_immext (void)
2999 if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3002 /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3003 with an opcode suffix which is coded in the same place as an
3004 8-bit immediate field would be.
3005 Here we check those operands and remove them afterwards. */
3008 for (x = 0; x < i.operands; x++)
3009 if (register_number (i.op[x].regs) != x)
3010 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3011 register_prefix, i.op[x].regs->reg_name, x + 1,
3017 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3018 which is coded in the same place as an 8-bit immediate field
3019 would be. Here we fake an 8-bit immediate operand from the
3020 opcode suffix stored in tm.extension_opcode.
3022 AVX instructions also use this encoding, for some of
3023 3 argument instructions. */
3025 gas_assert (i.imm_operands == 0
3027 || (i.tm.opcode_modifier.vex
3028 && i.operands <= 4)));
3030 exp = &im_expressions[i.imm_operands++];
3031 i.op[i.operands].imms = exp;
3032 i.types[i.operands] = imm8;
3034 exp->X_op = O_constant;
3035 exp->X_add_number = i.tm.extension_opcode;
3036 i.tm.extension_opcode = None;
3043 switch (i.tm.opcode_modifier.hleprefixok)
3048 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3049 as_bad (_("invalid instruction `%s' after `xacquire'"),
3052 as_bad (_("invalid instruction `%s' after `xrelease'"),
3056 if (i.prefix[LOCK_PREFIX])
3058 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3059 as_bad (_("missing `lock' with `xacquire'"));
3061 as_bad (_("missing `lock' with `xrelease'"));
3065 case HLEPrefixRelease:
3066 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3068 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3072 if (i.mem_operands == 0
3073 || !operand_type_check (i.types[i.operands - 1], anymem))
3075 as_bad (_("memory destination needed for instruction `%s'"
3076 " after `xrelease'"), i.tm.name);
3083 /* This is the guts of the machine-dependent assembler. LINE points to a
3084 machine dependent instruction. This function is supposed to emit
3085 the frags/bytes it assembles to. */
3088 md_assemble (char *line)
3091 char mnemonic[MAX_MNEM_SIZE];
3092 const insn_template *t;
3094 /* Initialize globals. */
3095 memset (&i, '\0', sizeof (i));
3096 for (j = 0; j < MAX_OPERANDS; j++)
3097 i.reloc[j] = NO_RELOC;
3098 memset (disp_expressions, '\0', sizeof (disp_expressions));
3099 memset (im_expressions, '\0', sizeof (im_expressions));
3100 save_stack_p = save_stack;
3102 /* First parse an instruction mnemonic & call i386_operand for the operands.
3103 We assume that the scrubber has arranged it so that line[0] is the valid
3104 start of a (possibly prefixed) mnemonic. */
3106 line = parse_insn (line, mnemonic);
3110 line = parse_operands (line, mnemonic);
3115 /* Now we've parsed the mnemonic into a set of templates, and have the
3116 operands at hand. */
3118 /* All intel opcodes have reversed operands except for "bound" and
3119 "enter". We also don't reverse intersegment "jmp" and "call"
3120 instructions with 2 immediate operands so that the immediate segment
3121 precedes the offset, as it does when in AT&T mode. */
3124 && (strcmp (mnemonic, "bound") != 0)
3125 && (strcmp (mnemonic, "invlpga") != 0)
3126 && !(operand_type_check (i.types[0], imm)
3127 && operand_type_check (i.types[1], imm)))
3130 /* The order of the immediates should be reversed
3131 for 2 immediates extrq and insertq instructions */
3132 if (i.imm_operands == 2
3133 && (strcmp (mnemonic, "extrq") == 0
3134 || strcmp (mnemonic, "insertq") == 0))
3135 swap_2_operands (0, 1);
3140 /* Don't optimize displacement for movabs since it only takes 64bit
3143 && i.disp_encoding != disp_encoding_32bit
3144 && (flag_code != CODE_64BIT
3145 || strcmp (mnemonic, "movabs") != 0))
3148 /* Next, we find a template that matches the given insn,
3149 making sure the overlap of the given operands types is consistent
3150 with the template operand types. */
3152 if (!(t = match_template ()))
3155 if (sse_check != check_none
3156 && !i.tm.opcode_modifier.noavx
3157 && (i.tm.cpu_flags.bitfield.cpusse
3158 || i.tm.cpu_flags.bitfield.cpusse2
3159 || i.tm.cpu_flags.bitfield.cpusse3
3160 || i.tm.cpu_flags.bitfield.cpussse3
3161 || i.tm.cpu_flags.bitfield.cpusse4_1
3162 || i.tm.cpu_flags.bitfield.cpusse4_2))
3164 (sse_check == check_warning
3166 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3169 /* Zap movzx and movsx suffix. The suffix has been set from
3170 "word ptr" or "byte ptr" on the source operand in Intel syntax
3171 or extracted from mnemonic in AT&T syntax. But we'll use
3172 the destination register to choose the suffix for encoding. */
3173 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3175 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3176 there is no suffix, the default will be byte extension. */
3177 if (i.reg_operands != 2
3180 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3185 if (i.tm.opcode_modifier.fwait)
3186 if (!add_prefix (FWAIT_OPCODE))
3189 /* Check for lock without a lockable instruction. Destination operand
3190 must be memory unless it is xchg (0x86). */
3191 if (i.prefix[LOCK_PREFIX]
3192 && (!i.tm.opcode_modifier.islockable
3193 || i.mem_operands == 0
3194 || (i.tm.base_opcode != 0x86
3195 && !operand_type_check (i.types[i.operands - 1], anymem))))
3197 as_bad (_("expecting lockable instruction after `lock'"));
3201 /* Check if HLE prefix is OK. */
3202 if (i.have_hle && !check_hle ())
3205 /* Check string instruction segment overrides. */
3206 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3208 if (!check_string ())
3210 i.disp_operands = 0;
3213 if (!process_suffix ())
3216 /* Update operand types. */
3217 for (j = 0; j < i.operands; j++)
3218 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3220 /* Make still unresolved immediate matches conform to size of immediate
3221 given in i.suffix. */
3222 if (!finalize_imm ())
3225 if (i.types[0].bitfield.imm1)
3226 i.imm_operands = 0; /* kludge for shift insns. */
3228 /* We only need to check those implicit registers for instructions
3229 with 3 operands or less. */
3230 if (i.operands <= 3)
3231 for (j = 0; j < i.operands; j++)
3232 if (i.types[j].bitfield.inoutportreg
3233 || i.types[j].bitfield.shiftcount
3234 || i.types[j].bitfield.acc
3235 || i.types[j].bitfield.floatacc)
3238 /* ImmExt should be processed after SSE2AVX. */
3239 if (!i.tm.opcode_modifier.sse2avx
3240 && i.tm.opcode_modifier.immext)
3243 /* For insns with operands there are more diddles to do to the opcode. */
3246 if (!process_operands ())
3249 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3251 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3252 as_warn (_("translating to `%sp'"), i.tm.name);
3255 if (i.tm.opcode_modifier.vex)
3256 build_vex_prefix (t);
3258 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3259 instructions may define INT_OPCODE as well, so avoid this corner
3260 case for those instructions that use MODRM. */
3261 if (i.tm.base_opcode == INT_OPCODE
3262 && !i.tm.opcode_modifier.modrm
3263 && i.op[0].imms->X_add_number == 3)
3265 i.tm.base_opcode = INT3_OPCODE;
3269 if ((i.tm.opcode_modifier.jump
3270 || i.tm.opcode_modifier.jumpbyte
3271 || i.tm.opcode_modifier.jumpdword)
3272 && i.op[0].disps->X_op == O_constant)
3274 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3275 the absolute address given by the constant. Since ix86 jumps and
3276 calls are pc relative, we need to generate a reloc. */
3277 i.op[0].disps->X_add_symbol = &abs_symbol;
3278 i.op[0].disps->X_op = O_symbol;
3281 if (i.tm.opcode_modifier.rex64)
3284 /* For 8 bit registers we need an empty rex prefix. Also if the
3285 instruction already has a prefix, we need to convert old
3286 registers to new ones. */
3288 if ((i.types[0].bitfield.reg8
3289 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3290 || (i.types[1].bitfield.reg8
3291 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3292 || ((i.types[0].bitfield.reg8
3293 || i.types[1].bitfield.reg8)
3298 i.rex |= REX_OPCODE;
3299 for (x = 0; x < 2; x++)
3301 /* Look for 8 bit operand that uses old registers. */
3302 if (i.types[x].bitfield.reg8
3303 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3305 /* In case it is "hi" register, give up. */
3306 if (i.op[x].regs->reg_num > 3)
3307 as_bad (_("can't encode register '%s%s' in an "
3308 "instruction requiring REX prefix."),
3309 register_prefix, i.op[x].regs->reg_name);
3311 /* Otherwise it is equivalent to the extended register.
3312 Since the encoding doesn't change this is merely
3313 cosmetic cleanup for debug output. */
3315 i.op[x].regs = i.op[x].regs + 8;
3321 add_prefix (REX_OPCODE | i.rex);
3323 /* We are ready to output the insn. */
3328 parse_insn (char *line, char *mnemonic)
3331 char *token_start = l;
3334 const insn_template *t;
3337 /* Non-zero if we found a prefix only acceptable with string insns. */
3338 const char *expecting_string_instruction = NULL;
3343 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3348 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3350 as_bad (_("no such instruction: `%s'"), token_start);
3355 if (!is_space_char (*l)
3356 && *l != END_OF_INSN
3358 || (*l != PREFIX_SEPARATOR
3361 as_bad (_("invalid character %s in mnemonic"),
3362 output_invalid (*l));
3365 if (token_start == l)
3367 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3368 as_bad (_("expecting prefix; got nothing"));
3370 as_bad (_("expecting mnemonic; got nothing"));
3374 /* Look up instruction (or prefix) via hash table. */
3375 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3377 if (*l != END_OF_INSN
3378 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3379 && current_templates
3380 && current_templates->start->opcode_modifier.isprefix)
3382 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3384 as_bad ((flag_code != CODE_64BIT
3385 ? _("`%s' is only supported in 64-bit mode")
3386 : _("`%s' is not supported in 64-bit mode")),
3387 current_templates->start->name);
3390 /* If we are in 16-bit mode, do not allow addr16 or data16.
3391 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3392 if ((current_templates->start->opcode_modifier.size16
3393 || current_templates->start->opcode_modifier.size32)
3394 && flag_code != CODE_64BIT
3395 && (current_templates->start->opcode_modifier.size32
3396 ^ (flag_code == CODE_16BIT)))
3398 as_bad (_("redundant %s prefix"),
3399 current_templates->start->name);
3402 /* Add prefix, checking for repeated prefixes. */
3403 switch (add_prefix (current_templates->start->base_opcode))
3408 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3411 expecting_string_instruction = current_templates->start->name;
3416 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3423 if (!current_templates)
3425 /* Check if we should swap operand or force 32bit displacement in
3427 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3429 else if (mnem_p - 3 == dot_p
3432 i.disp_encoding = disp_encoding_8bit;
3433 else if (mnem_p - 4 == dot_p
3437 i.disp_encoding = disp_encoding_32bit;
3442 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3445 if (!current_templates)
3448 /* See if we can get a match by trimming off a suffix. */
3451 case WORD_MNEM_SUFFIX:
3452 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3453 i.suffix = SHORT_MNEM_SUFFIX;
3455 case BYTE_MNEM_SUFFIX:
3456 case QWORD_MNEM_SUFFIX:
3457 i.suffix = mnem_p[-1];
3459 current_templates = (const templates *) hash_find (op_hash,
3462 case SHORT_MNEM_SUFFIX:
3463 case LONG_MNEM_SUFFIX:
3466 i.suffix = mnem_p[-1];
3468 current_templates = (const templates *) hash_find (op_hash,
3477 if (intel_float_operand (mnemonic) == 1)
3478 i.suffix = SHORT_MNEM_SUFFIX;
3480 i.suffix = LONG_MNEM_SUFFIX;
3482 current_templates = (const templates *) hash_find (op_hash,
3487 if (!current_templates)
3489 as_bad (_("no such instruction: `%s'"), token_start);
3494 if (current_templates->start->opcode_modifier.jump
3495 || current_templates->start->opcode_modifier.jumpbyte)
3497 /* Check for a branch hint. We allow ",pt" and ",pn" for
3498 predict taken and predict not taken respectively.
3499 I'm not sure that branch hints actually do anything on loop
3500 and jcxz insns (JumpByte) for current Pentium4 chips. They
3501 may work in the future and it doesn't hurt to accept them
3503 if (l[0] == ',' && l[1] == 'p')
3507 if (!add_prefix (DS_PREFIX_OPCODE))
3511 else if (l[2] == 'n')
3513 if (!add_prefix (CS_PREFIX_OPCODE))
3519 /* Any other comma loses. */
3522 as_bad (_("invalid character %s in mnemonic"),
3523 output_invalid (*l));
3527 /* Check if instruction is supported on specified architecture. */
3529 for (t = current_templates->start; t < current_templates->end; ++t)
3531 supported |= cpu_flags_match (t);
3532 if (supported == CPU_FLAGS_PERFECT_MATCH)
3536 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3538 as_bad (flag_code == CODE_64BIT
3539 ? _("`%s' is not supported in 64-bit mode")
3540 : _("`%s' is only supported in 64-bit mode"),
3541 current_templates->start->name);
3544 if (supported != CPU_FLAGS_PERFECT_MATCH)
3546 as_bad (_("`%s' is not supported on `%s%s'"),
3547 current_templates->start->name,
3548 cpu_arch_name ? cpu_arch_name : default_arch,
3549 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3554 if (!cpu_arch_flags.bitfield.cpui386
3555 && (flag_code != CODE_16BIT))
3557 as_warn (_("use .code16 to ensure correct addressing mode"));
3560 /* Check for rep/repne without a string (or other allowed) instruction. */
3561 if (expecting_string_instruction)
3563 static templates override;
3565 for (t = current_templates->start; t < current_templates->end; ++t)
3566 if (t->opcode_modifier.repprefixok)
3568 if (t >= current_templates->end)
3570 as_bad (_("expecting string instruction after `%s'"),
3571 expecting_string_instruction);
3574 for (override.start = t; t < current_templates->end; ++t)
3575 if (!t->opcode_modifier.repprefixok)
3578 current_templates = &override;
3585 parse_operands (char *l, const char *mnemonic)
3589 /* 1 if operand is pending after ','. */
3590 unsigned int expecting_operand = 0;
3592 /* Non-zero if operand parens not balanced. */
3593 unsigned int paren_not_balanced;
3595 while (*l != END_OF_INSN)
3597 /* Skip optional white space before operand. */
3598 if (is_space_char (*l))
3600 if (!is_operand_char (*l) && *l != END_OF_INSN)
3602 as_bad (_("invalid character %s before operand %d"),
3603 output_invalid (*l),
3607 token_start = l; /* after white space */
3608 paren_not_balanced = 0;
3609 while (paren_not_balanced || *l != ',')
3611 if (*l == END_OF_INSN)
3613 if (paren_not_balanced)
3616 as_bad (_("unbalanced parenthesis in operand %d."),
3619 as_bad (_("unbalanced brackets in operand %d."),
3624 break; /* we are done */
3626 else if (!is_operand_char (*l) && !is_space_char (*l))
3628 as_bad (_("invalid character %s in operand %d"),
3629 output_invalid (*l),
3636 ++paren_not_balanced;
3638 --paren_not_balanced;
3643 ++paren_not_balanced;
3645 --paren_not_balanced;
3649 if (l != token_start)
3650 { /* Yes, we've read in another operand. */
3651 unsigned int operand_ok;
3652 this_operand = i.operands++;
3653 i.types[this_operand].bitfield.unspecified = 1;
3654 if (i.operands > MAX_OPERANDS)
3656 as_bad (_("spurious operands; (%d operands/instruction max)"),
3660 /* Now parse operand adding info to 'i' as we go along. */
3661 END_STRING_AND_SAVE (l);
3665 i386_intel_operand (token_start,
3666 intel_float_operand (mnemonic));
3668 operand_ok = i386_att_operand (token_start);
3670 RESTORE_END_STRING (l);
3676 if (expecting_operand)
3678 expecting_operand_after_comma:
3679 as_bad (_("expecting operand after ','; got nothing"));
3684 as_bad (_("expecting operand before ','; got nothing"));
3689 /* Now *l must be either ',' or END_OF_INSN. */
3692 if (*++l == END_OF_INSN)
3694 /* Just skip it, if it's \n complain. */
3695 goto expecting_operand_after_comma;
3697 expecting_operand = 1;
3704 swap_2_operands (int xchg1, int xchg2)
3706 union i386_op temp_op;
3707 i386_operand_type temp_type;
3708 enum bfd_reloc_code_real temp_reloc;
3710 temp_type = i.types[xchg2];
3711 i.types[xchg2] = i.types[xchg1];
3712 i.types[xchg1] = temp_type;
3713 temp_op = i.op[xchg2];
3714 i.op[xchg2] = i.op[xchg1];
3715 i.op[xchg1] = temp_op;
3716 temp_reloc = i.reloc[xchg2];
3717 i.reloc[xchg2] = i.reloc[xchg1];
3718 i.reloc[xchg1] = temp_reloc;
3722 swap_operands (void)
3728 swap_2_operands (1, i.operands - 2);
3731 swap_2_operands (0, i.operands - 1);
3737 if (i.mem_operands == 2)
3739 const seg_entry *temp_seg;
3740 temp_seg = i.seg[0];
3741 i.seg[0] = i.seg[1];
3742 i.seg[1] = temp_seg;
3746 /* Try to ensure constant immediates are represented in the smallest
3751 char guess_suffix = 0;
3755 guess_suffix = i.suffix;
3756 else if (i.reg_operands)
3758 /* Figure out a suffix from the last register operand specified.
3759 We can't do this properly yet, ie. excluding InOutPortReg,
3760 but the following works for instructions with immediates.
3761 In any case, we can't set i.suffix yet. */
3762 for (op = i.operands; --op >= 0;)
3763 if (i.types[op].bitfield.reg8)
3765 guess_suffix = BYTE_MNEM_SUFFIX;
3768 else if (i.types[op].bitfield.reg16)
3770 guess_suffix = WORD_MNEM_SUFFIX;
3773 else if (i.types[op].bitfield.reg32)
3775 guess_suffix = LONG_MNEM_SUFFIX;
3778 else if (i.types[op].bitfield.reg64)
3780 guess_suffix = QWORD_MNEM_SUFFIX;
3784 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3785 guess_suffix = WORD_MNEM_SUFFIX;
3787 for (op = i.operands; --op >= 0;)
3788 if (operand_type_check (i.types[op], imm))
3790 switch (i.op[op].imms->X_op)
3793 /* If a suffix is given, this operand may be shortened. */
3794 switch (guess_suffix)
3796 case LONG_MNEM_SUFFIX:
3797 i.types[op].bitfield.imm32 = 1;
3798 i.types[op].bitfield.imm64 = 1;
3800 case WORD_MNEM_SUFFIX:
3801 i.types[op].bitfield.imm16 = 1;
3802 i.types[op].bitfield.imm32 = 1;
3803 i.types[op].bitfield.imm32s = 1;
3804 i.types[op].bitfield.imm64 = 1;
3806 case BYTE_MNEM_SUFFIX:
3807 i.types[op].bitfield.imm8 = 1;
3808 i.types[op].bitfield.imm8s = 1;
3809 i.types[op].bitfield.imm16 = 1;
3810 i.types[op].bitfield.imm32 = 1;
3811 i.types[op].bitfield.imm32s = 1;
3812 i.types[op].bitfield.imm64 = 1;
3816 /* If this operand is at most 16 bits, convert it
3817 to a signed 16 bit number before trying to see
3818 whether it will fit in an even smaller size.
3819 This allows a 16-bit operand such as $0xffe0 to
3820 be recognised as within Imm8S range. */
3821 if ((i.types[op].bitfield.imm16)
3822 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3824 i.op[op].imms->X_add_number =
3825 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3827 if ((i.types[op].bitfield.imm32)
3828 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3831 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3832 ^ ((offsetT) 1 << 31))
3833 - ((offsetT) 1 << 31));
3836 = operand_type_or (i.types[op],
3837 smallest_imm_type (i.op[op].imms->X_add_number));
3839 /* We must avoid matching of Imm32 templates when 64bit
3840 only immediate is available. */
3841 if (guess_suffix == QWORD_MNEM_SUFFIX)
3842 i.types[op].bitfield.imm32 = 0;
3849 /* Symbols and expressions. */
3851 /* Convert symbolic operand to proper sizes for matching, but don't
3852 prevent matching a set of insns that only supports sizes other
3853 than those matching the insn suffix. */
3855 i386_operand_type mask, allowed;
3856 const insn_template *t;
3858 operand_type_set (&mask, 0);
3859 operand_type_set (&allowed, 0);
3861 for (t = current_templates->start;
3862 t < current_templates->end;
3864 allowed = operand_type_or (allowed,
3865 t->operand_types[op]);
3866 switch (guess_suffix)
3868 case QWORD_MNEM_SUFFIX:
3869 mask.bitfield.imm64 = 1;
3870 mask.bitfield.imm32s = 1;
3872 case LONG_MNEM_SUFFIX:
3873 mask.bitfield.imm32 = 1;
3875 case WORD_MNEM_SUFFIX:
3876 mask.bitfield.imm16 = 1;
3878 case BYTE_MNEM_SUFFIX:
3879 mask.bitfield.imm8 = 1;
3884 allowed = operand_type_and (mask, allowed);
3885 if (!operand_type_all_zero (&allowed))
3886 i.types[op] = operand_type_and (i.types[op], mask);
3893 /* Try to use the smallest displacement type too. */
3895 optimize_disp (void)
3899 for (op = i.operands; --op >= 0;)
3900 if (operand_type_check (i.types[op], disp))
3902 if (i.op[op].disps->X_op == O_constant)
3904 offsetT op_disp = i.op[op].disps->X_add_number;
3906 if (i.types[op].bitfield.disp16
3907 && (op_disp & ~(offsetT) 0xffff) == 0)
3909 /* If this operand is at most 16 bits, convert
3910 to a signed 16 bit number and don't use 64bit
3912 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3913 i.types[op].bitfield.disp64 = 0;
3915 if (i.types[op].bitfield.disp32
3916 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3918 /* If this operand is at most 32 bits, convert
3919 to a signed 32 bit number and don't use 64bit
3921 op_disp &= (((offsetT) 2 << 31) - 1);
3922 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3923 i.types[op].bitfield.disp64 = 0;
3925 if (!op_disp && i.types[op].bitfield.baseindex)
3927 i.types[op].bitfield.disp8 = 0;
3928 i.types[op].bitfield.disp16 = 0;
3929 i.types[op].bitfield.disp32 = 0;
3930 i.types[op].bitfield.disp32s = 0;
3931 i.types[op].bitfield.disp64 = 0;
3935 else if (flag_code == CODE_64BIT)
3937 if (fits_in_signed_long (op_disp))
3939 i.types[op].bitfield.disp64 = 0;
3940 i.types[op].bitfield.disp32s = 1;
3942 if (i.prefix[ADDR_PREFIX]
3943 && fits_in_unsigned_long (op_disp))
3944 i.types[op].bitfield.disp32 = 1;
3946 if ((i.types[op].bitfield.disp32
3947 || i.types[op].bitfield.disp32s
3948 || i.types[op].bitfield.disp16)
3949 && fits_in_signed_byte (op_disp))
3950 i.types[op].bitfield.disp8 = 1;
3952 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3953 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3955 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3956 i.op[op].disps, 0, i.reloc[op]);
3957 i.types[op].bitfield.disp8 = 0;
3958 i.types[op].bitfield.disp16 = 0;
3959 i.types[op].bitfield.disp32 = 0;
3960 i.types[op].bitfield.disp32s = 0;
3961 i.types[op].bitfield.disp64 = 0;
3964 /* We only support 64bit displacement on constants. */
3965 i.types[op].bitfield.disp64 = 0;
3969 /* Check if operands are valid for the instruction. */
3972 check_VecOperands (const insn_template *t)
3974 /* Without VSIB byte, we can't have a vector register for index. */
3975 if (!t->opcode_modifier.vecsib
3977 && (i.index_reg->reg_type.bitfield.regxmm
3978 || i.index_reg->reg_type.bitfield.regymm))
3980 i.error = unsupported_vector_index_register;
3984 /* For VSIB byte, we need a vector register for index, and all vector
3985 registers must be distinct. */
3986 if (t->opcode_modifier.vecsib)
3989 || !((t->opcode_modifier.vecsib == VecSIB128
3990 && i.index_reg->reg_type.bitfield.regxmm)
3991 || (t->opcode_modifier.vecsib == VecSIB256
3992 && i.index_reg->reg_type.bitfield.regymm)))
3994 i.error = invalid_vsib_address;
3998 gas_assert (i.reg_operands == 2);
3999 gas_assert (i.types[0].bitfield.regxmm
4000 || i.types[0].bitfield.regymm);
4001 gas_assert (i.types[2].bitfield.regxmm
4002 || i.types[2].bitfield.regymm);
4004 if (operand_check == check_none)
4006 if (register_number (i.op[0].regs) != register_number (i.index_reg)
4007 && register_number (i.op[2].regs) != register_number (i.index_reg)
4008 && register_number (i.op[0].regs) != register_number (i.op[2].regs))
4010 if (operand_check == check_error)
4012 i.error = invalid_vector_register_set;
4015 as_warn (_("mask, index, and destination registers should be distinct"));
4021 /* Check if operands are valid for the instruction. Update VEX
4025 VEX_check_operands (const insn_template *t)
4027 if (!t->opcode_modifier.vex)
4030 /* Only check VEX_Imm4, which must be the first operand. */
4031 if (t->operand_types[0].bitfield.vec_imm4)
4033 if (i.op[0].imms->X_op != O_constant
4034 || !fits_in_imm4 (i.op[0].imms->X_add_number))
4040 /* Turn off Imm8 so that update_imm won't complain. */
4041 i.types[0] = vec_imm4;
4047 static const insn_template *
4048 match_template (void)
4050 /* Points to template once we've found it. */
4051 const insn_template *t;
4052 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4053 i386_operand_type overlap4;
4054 unsigned int found_reverse_match;
4055 i386_opcode_modifier suffix_check;
4056 i386_operand_type operand_types [MAX_OPERANDS];
4057 int addr_prefix_disp;
4059 unsigned int found_cpu_match;
4060 unsigned int check_register;
4061 enum i386_error specific_error = 0;
4063 #if MAX_OPERANDS != 5
4064 # error "MAX_OPERANDS must be 5."
4067 found_reverse_match = 0;
4068 addr_prefix_disp = -1;
4070 memset (&suffix_check, 0, sizeof (suffix_check));
4071 if (i.suffix == BYTE_MNEM_SUFFIX)
4072 suffix_check.no_bsuf = 1;
4073 else if (i.suffix == WORD_MNEM_SUFFIX)
4074 suffix_check.no_wsuf = 1;
4075 else if (i.suffix == SHORT_MNEM_SUFFIX)
4076 suffix_check.no_ssuf = 1;
4077 else if (i.suffix == LONG_MNEM_SUFFIX)
4078 suffix_check.no_lsuf = 1;
4079 else if (i.suffix == QWORD_MNEM_SUFFIX)
4080 suffix_check.no_qsuf = 1;
4081 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4082 suffix_check.no_ldsuf = 1;
4084 /* Must have right number of operands. */
4085 i.error = number_of_operands_mismatch;
4087 for (t = current_templates->start; t < current_templates->end; t++)
4089 addr_prefix_disp = -1;
4091 if (i.operands != t->operands)
4094 /* Check processor support. */
4095 i.error = unsupported;
4096 found_cpu_match = (cpu_flags_match (t)
4097 == CPU_FLAGS_PERFECT_MATCH);
4098 if (!found_cpu_match)
4101 /* Check old gcc support. */
4102 i.error = old_gcc_only;
4103 if (!old_gcc && t->opcode_modifier.oldgcc)
4106 /* Check AT&T mnemonic. */
4107 i.error = unsupported_with_intel_mnemonic;
4108 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4111 /* Check AT&T/Intel syntax. */
4112 i.error = unsupported_syntax;
4113 if ((intel_syntax && t->opcode_modifier.attsyntax)
4114 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4117 /* Check the suffix, except for some instructions in intel mode. */
4118 i.error = invalid_instruction_suffix;
4119 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4120 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4121 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4122 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4123 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4124 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4125 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4128 if (!operand_size_match (t))
4131 for (j = 0; j < MAX_OPERANDS; j++)
4132 operand_types[j] = t->operand_types[j];
4134 /* In general, don't allow 64-bit operands in 32-bit mode. */
4135 if (i.suffix == QWORD_MNEM_SUFFIX
4136 && flag_code != CODE_64BIT
4138 ? (!t->opcode_modifier.ignoresize
4139 && !intel_float_operand (t->name))
4140 : intel_float_operand (t->name) != 2)
4141 && ((!operand_types[0].bitfield.regmmx
4142 && !operand_types[0].bitfield.regxmm
4143 && !operand_types[0].bitfield.regymm)
4144 || (!operand_types[t->operands > 1].bitfield.regmmx
4145 && !!operand_types[t->operands > 1].bitfield.regxmm
4146 && !!operand_types[t->operands > 1].bitfield.regymm))
4147 && (t->base_opcode != 0x0fc7
4148 || t->extension_opcode != 1 /* cmpxchg8b */))
4151 /* In general, don't allow 32-bit operands on pre-386. */
4152 else if (i.suffix == LONG_MNEM_SUFFIX
4153 && !cpu_arch_flags.bitfield.cpui386
4155 ? (!t->opcode_modifier.ignoresize
4156 && !intel_float_operand (t->name))
4157 : intel_float_operand (t->name) != 2)
4158 && ((!operand_types[0].bitfield.regmmx
4159 && !operand_types[0].bitfield.regxmm)
4160 || (!operand_types[t->operands > 1].bitfield.regmmx
4161 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4164 /* Do not verify operands when there are none. */
4168 /* We've found a match; break out of loop. */
4172 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4173 into Disp32/Disp16/Disp32 operand. */
4174 if (i.prefix[ADDR_PREFIX] != 0)
4176 /* There should be only one Disp operand. */
4180 for (j = 0; j < MAX_OPERANDS; j++)
4182 if (operand_types[j].bitfield.disp16)
4184 addr_prefix_disp = j;
4185 operand_types[j].bitfield.disp32 = 1;
4186 operand_types[j].bitfield.disp16 = 0;
4192 for (j = 0; j < MAX_OPERANDS; j++)
4194 if (operand_types[j].bitfield.disp32)
4196 addr_prefix_disp = j;
4197 operand_types[j].bitfield.disp32 = 0;
4198 operand_types[j].bitfield.disp16 = 1;
4204 for (j = 0; j < MAX_OPERANDS; j++)
4206 if (operand_types[j].bitfield.disp64)
4208 addr_prefix_disp = j;
4209 operand_types[j].bitfield.disp64 = 0;
4210 operand_types[j].bitfield.disp32 = 1;
4218 /* We check register size if needed. */
4219 check_register = t->opcode_modifier.checkregsize;
4220 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4221 switch (t->operands)
4224 if (!operand_type_match (overlap0, i.types[0]))
4228 /* xchg %eax, %eax is a special case. It is an aliase for nop
4229 only in 32bit mode and we can use opcode 0x90. In 64bit
4230 mode, we can't use 0x90 for xchg %eax, %eax since it should
4231 zero-extend %eax to %rax. */
4232 if (flag_code == CODE_64BIT
4233 && t->base_opcode == 0x90
4234 && operand_type_equal (&i.types [0], &acc32)
4235 && operand_type_equal (&i.types [1], &acc32))
4239 /* If we swap operand in encoding, we either match
4240 the next one or reverse direction of operands. */
4241 if (t->opcode_modifier.s)
4243 else if (t->opcode_modifier.d)
4248 /* If we swap operand in encoding, we match the next one. */
4249 if (i.swap_operand && t->opcode_modifier.s)
4253 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4254 if (!operand_type_match (overlap0, i.types[0])
4255 || !operand_type_match (overlap1, i.types[1])
4257 && !operand_type_register_match (overlap0, i.types[0],
4259 overlap1, i.types[1],
4262 /* Check if other direction is valid ... */
4263 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4267 /* Try reversing direction of operands. */
4268 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4269 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4270 if (!operand_type_match (overlap0, i.types[0])
4271 || !operand_type_match (overlap1, i.types[1])
4273 && !operand_type_register_match (overlap0,
4280 /* Does not match either direction. */
4283 /* found_reverse_match holds which of D or FloatDR
4285 if (t->opcode_modifier.d)
4286 found_reverse_match = Opcode_D;
4287 else if (t->opcode_modifier.floatd)
4288 found_reverse_match = Opcode_FloatD;
4290 found_reverse_match = 0;
4291 if (t->opcode_modifier.floatr)
4292 found_reverse_match |= Opcode_FloatR;
4296 /* Found a forward 2 operand match here. */
4297 switch (t->operands)
4300 overlap4 = operand_type_and (i.types[4],
4303 overlap3 = operand_type_and (i.types[3],
4306 overlap2 = operand_type_and (i.types[2],
4311 switch (t->operands)
4314 if (!operand_type_match (overlap4, i.types[4])
4315 || !operand_type_register_match (overlap3,
4323 if (!operand_type_match (overlap3, i.types[3])
4325 && !operand_type_register_match (overlap2,
4333 /* Here we make use of the fact that there are no
4334 reverse match 3 operand instructions, and all 3
4335 operand instructions only need to be checked for
4336 register consistency between operands 2 and 3. */
4337 if (!operand_type_match (overlap2, i.types[2])
4339 && !operand_type_register_match (overlap1,
4349 /* Found either forward/reverse 2, 3 or 4 operand match here:
4350 slip through to break. */
4352 if (!found_cpu_match)
4354 found_reverse_match = 0;
4358 /* Check if vector and VEX operands are valid. */
4359 if (check_VecOperands (t) || VEX_check_operands (t))
4361 specific_error = i.error;
4365 /* We've found a match; break out of loop. */
4369 if (t == current_templates->end)
4371 /* We found no match. */
4372 const char *err_msg;
4373 switch (specific_error ? specific_error : i.error)
4377 case operand_size_mismatch:
4378 err_msg = _("operand size mismatch");
4380 case operand_type_mismatch:
4381 err_msg = _("operand type mismatch");
4383 case register_type_mismatch:
4384 err_msg = _("register type mismatch");
4386 case number_of_operands_mismatch:
4387 err_msg = _("number of operands mismatch");
4389 case invalid_instruction_suffix:
4390 err_msg = _("invalid instruction suffix");
4393 err_msg = _("constant doesn't fit in 4 bits");
4396 err_msg = _("only supported with old gcc");
4398 case unsupported_with_intel_mnemonic:
4399 err_msg = _("unsupported with Intel mnemonic");
4401 case unsupported_syntax:
4402 err_msg = _("unsupported syntax");
4405 as_bad (_("unsupported instruction `%s'"),
4406 current_templates->start->name);
4408 case invalid_vsib_address:
4409 err_msg = _("invalid VSIB address");
4411 case invalid_vector_register_set:
4412 err_msg = _("mask, index, and destination registers must be distinct");
4414 case unsupported_vector_index_register:
4415 err_msg = _("unsupported vector index register");
4418 as_bad (_("%s for `%s'"), err_msg,
4419 current_templates->start->name);
4423 if (!quiet_warnings)
4426 && (i.types[0].bitfield.jumpabsolute
4427 != operand_types[0].bitfield.jumpabsolute))
4429 as_warn (_("indirect %s without `*'"), t->name);
4432 if (t->opcode_modifier.isprefix
4433 && t->opcode_modifier.ignoresize)
4435 /* Warn them that a data or address size prefix doesn't
4436 affect assembly of the next line of code. */
4437 as_warn (_("stand-alone `%s' prefix"), t->name);
4441 /* Copy the template we found. */
4444 if (addr_prefix_disp != -1)
4445 i.tm.operand_types[addr_prefix_disp]
4446 = operand_types[addr_prefix_disp];
4448 if (found_reverse_match)
4450 /* If we found a reverse match we must alter the opcode
4451 direction bit. found_reverse_match holds bits to change
4452 (different for int & float insns). */
4454 i.tm.base_opcode ^= found_reverse_match;
4456 i.tm.operand_types[0] = operand_types[1];
4457 i.tm.operand_types[1] = operand_types[0];
4466 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4467 if (i.tm.operand_types[mem_op].bitfield.esseg)
4469 if (i.seg[0] != NULL && i.seg[0] != &es)
4471 as_bad (_("`%s' operand %d must use `%ses' segment"),
4477 /* There's only ever one segment override allowed per instruction.
4478 This instruction possibly has a legal segment override on the
4479 second operand, so copy the segment to where non-string
4480 instructions store it, allowing common code. */
4481 i.seg[0] = i.seg[1];
4483 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4485 if (i.seg[1] != NULL && i.seg[1] != &es)
4487 as_bad (_("`%s' operand %d must use `%ses' segment"),
4498 process_suffix (void)
4500 /* If matched instruction specifies an explicit instruction mnemonic
4502 if (i.tm.opcode_modifier.size16)
4503 i.suffix = WORD_MNEM_SUFFIX;
4504 else if (i.tm.opcode_modifier.size32)
4505 i.suffix = LONG_MNEM_SUFFIX;
4506 else if (i.tm.opcode_modifier.size64)
4507 i.suffix = QWORD_MNEM_SUFFIX;
4508 else if (i.reg_operands)
4510 /* If there's no instruction mnemonic suffix we try to invent one
4511 based on register operands. */
4514 /* We take i.suffix from the last register operand specified,
4515 Destination register type is more significant than source
4516 register type. crc32 in SSE4.2 prefers source register
4518 if (i.tm.base_opcode == 0xf20f38f1)
4520 if (i.types[0].bitfield.reg16)
4521 i.suffix = WORD_MNEM_SUFFIX;
4522 else if (i.types[0].bitfield.reg32)
4523 i.suffix = LONG_MNEM_SUFFIX;
4524 else if (i.types[0].bitfield.reg64)
4525 i.suffix = QWORD_MNEM_SUFFIX;
4527 else if (i.tm.base_opcode == 0xf20f38f0)
4529 if (i.types[0].bitfield.reg8)
4530 i.suffix = BYTE_MNEM_SUFFIX;
4537 if (i.tm.base_opcode == 0xf20f38f1
4538 || i.tm.base_opcode == 0xf20f38f0)
4540 /* We have to know the operand size for crc32. */
4541 as_bad (_("ambiguous memory operand size for `%s`"),
4546 for (op = i.operands; --op >= 0;)
4547 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4549 if (i.types[op].bitfield.reg8)
4551 i.suffix = BYTE_MNEM_SUFFIX;
4554 else if (i.types[op].bitfield.reg16)
4556 i.suffix = WORD_MNEM_SUFFIX;
4559 else if (i.types[op].bitfield.reg32)
4561 i.suffix = LONG_MNEM_SUFFIX;
4564 else if (i.types[op].bitfield.reg64)
4566 i.suffix = QWORD_MNEM_SUFFIX;
4572 else if (i.suffix == BYTE_MNEM_SUFFIX)
4575 && i.tm.opcode_modifier.ignoresize
4576 && i.tm.opcode_modifier.no_bsuf)
4578 else if (!check_byte_reg ())
4581 else if (i.suffix == LONG_MNEM_SUFFIX)
4584 && i.tm.opcode_modifier.ignoresize
4585 && i.tm.opcode_modifier.no_lsuf)
4587 else if (!check_long_reg ())
4590 else if (i.suffix == QWORD_MNEM_SUFFIX)
4593 && i.tm.opcode_modifier.ignoresize
4594 && i.tm.opcode_modifier.no_qsuf)
4596 else if (!check_qword_reg ())
4599 else if (i.suffix == WORD_MNEM_SUFFIX)
4602 && i.tm.opcode_modifier.ignoresize
4603 && i.tm.opcode_modifier.no_wsuf)
4605 else if (!check_word_reg ())
4608 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4609 || i.suffix == YMMWORD_MNEM_SUFFIX)
4611 /* Skip if the instruction has x/y suffix. match_template
4612 should check if it is a valid suffix. */
4614 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4615 /* Do nothing if the instruction is going to ignore the prefix. */
4620 else if (i.tm.opcode_modifier.defaultsize
4622 /* exclude fldenv/frstor/fsave/fstenv */
4623 && i.tm.opcode_modifier.no_ssuf)
4625 i.suffix = stackop_size;
4627 else if (intel_syntax
4629 && (i.tm.operand_types[0].bitfield.jumpabsolute
4630 || i.tm.opcode_modifier.jumpbyte
4631 || i.tm.opcode_modifier.jumpintersegment
4632 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4633 && i.tm.extension_opcode <= 3)))
4638 if (!i.tm.opcode_modifier.no_qsuf)
4640 i.suffix = QWORD_MNEM_SUFFIX;
4644 if (!i.tm.opcode_modifier.no_lsuf)
4645 i.suffix = LONG_MNEM_SUFFIX;
4648 if (!i.tm.opcode_modifier.no_wsuf)
4649 i.suffix = WORD_MNEM_SUFFIX;
4658 if (i.tm.opcode_modifier.w)
4660 as_bad (_("no instruction mnemonic suffix given and "
4661 "no register operands; can't size instruction"));
4667 unsigned int suffixes;
4669 suffixes = !i.tm.opcode_modifier.no_bsuf;
4670 if (!i.tm.opcode_modifier.no_wsuf)
4672 if (!i.tm.opcode_modifier.no_lsuf)
4674 if (!i.tm.opcode_modifier.no_ldsuf)
4676 if (!i.tm.opcode_modifier.no_ssuf)
4678 if (!i.tm.opcode_modifier.no_qsuf)
4681 /* There are more than suffix matches. */
4682 if (i.tm.opcode_modifier.w
4683 || ((suffixes & (suffixes - 1))
4684 && !i.tm.opcode_modifier.defaultsize
4685 && !i.tm.opcode_modifier.ignoresize))
4687 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4693 /* Change the opcode based on the operand size given by i.suffix;
4694 We don't need to change things for byte insns. */
4697 && i.suffix != BYTE_MNEM_SUFFIX
4698 && i.suffix != XMMWORD_MNEM_SUFFIX
4699 && i.suffix != YMMWORD_MNEM_SUFFIX)
4701 /* It's not a byte, select word/dword operation. */
4702 if (i.tm.opcode_modifier.w)
4704 if (i.tm.opcode_modifier.shortform)
4705 i.tm.base_opcode |= 8;
4707 i.tm.base_opcode |= 1;
4710 /* Now select between word & dword operations via the operand
4711 size prefix, except for instructions that will ignore this
4713 if (i.tm.opcode_modifier.addrprefixop0)
4715 /* The address size override prefix changes the size of the
4717 if ((flag_code == CODE_32BIT
4718 && i.op->regs[0].reg_type.bitfield.reg16)
4719 || (flag_code != CODE_32BIT
4720 && i.op->regs[0].reg_type.bitfield.reg32))
4721 if (!add_prefix (ADDR_PREFIX_OPCODE))
4724 else if (i.suffix != QWORD_MNEM_SUFFIX
4725 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4726 && !i.tm.opcode_modifier.ignoresize
4727 && !i.tm.opcode_modifier.floatmf
4728 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4729 || (flag_code == CODE_64BIT
4730 && i.tm.opcode_modifier.jumpbyte)))
4732 unsigned int prefix = DATA_PREFIX_OPCODE;
4734 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4735 prefix = ADDR_PREFIX_OPCODE;
4737 if (!add_prefix (prefix))
4741 /* Set mode64 for an operand. */
4742 if (i.suffix == QWORD_MNEM_SUFFIX
4743 && flag_code == CODE_64BIT
4744 && !i.tm.opcode_modifier.norex64)
4746 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4747 need rex64. cmpxchg8b is also a special case. */
4748 if (! (i.operands == 2
4749 && i.tm.base_opcode == 0x90
4750 && i.tm.extension_opcode == None
4751 && operand_type_equal (&i.types [0], &acc64)
4752 && operand_type_equal (&i.types [1], &acc64))
4753 && ! (i.operands == 1
4754 && i.tm.base_opcode == 0xfc7
4755 && i.tm.extension_opcode == 1
4756 && !operand_type_check (i.types [0], reg)
4757 && operand_type_check (i.types [0], anymem)))
4761 /* Size floating point instruction. */
4762 if (i.suffix == LONG_MNEM_SUFFIX)
4763 if (i.tm.opcode_modifier.floatmf)
4764 i.tm.base_opcode ^= 4;
4771 check_byte_reg (void)
4775 for (op = i.operands; --op >= 0;)
4777 /* If this is an eight bit register, it's OK. If it's the 16 or
4778 32 bit version of an eight bit register, we will just use the
4779 low portion, and that's OK too. */
4780 if (i.types[op].bitfield.reg8)
4783 /* I/O port address operands are OK too. */
4784 if (i.tm.operand_types[op].bitfield.inoutportreg)
4787 /* crc32 doesn't generate this warning. */
4788 if (i.tm.base_opcode == 0xf20f38f0)
4791 if ((i.types[op].bitfield.reg16
4792 || i.types[op].bitfield.reg32
4793 || i.types[op].bitfield.reg64)
4794 && i.op[op].regs->reg_num < 4
4795 /* Prohibit these changes in 64bit mode, since the lowering
4796 would be more complicated. */
4797 && flag_code != CODE_64BIT)
4799 #if REGISTER_WARNINGS
4800 if (!quiet_warnings)
4801 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4803 (i.op[op].regs + (i.types[op].bitfield.reg16
4804 ? REGNAM_AL - REGNAM_AX
4805 : REGNAM_AL - REGNAM_EAX))->reg_name,
4807 i.op[op].regs->reg_name,
4812 /* Any other register is bad. */
4813 if (i.types[op].bitfield.reg16
4814 || i.types[op].bitfield.reg32
4815 || i.types[op].bitfield.reg64
4816 || i.types[op].bitfield.regmmx
4817 || i.types[op].bitfield.regxmm
4818 || i.types[op].bitfield.regymm
4819 || i.types[op].bitfield.sreg2
4820 || i.types[op].bitfield.sreg3
4821 || i.types[op].bitfield.control
4822 || i.types[op].bitfield.debug
4823 || i.types[op].bitfield.test
4824 || i.types[op].bitfield.floatreg
4825 || i.types[op].bitfield.floatacc)
4827 as_bad (_("`%s%s' not allowed with `%s%c'"),
4829 i.op[op].regs->reg_name,
4839 check_long_reg (void)
4843 for (op = i.operands; --op >= 0;)
4844 /* Reject eight bit registers, except where the template requires
4845 them. (eg. movzb) */
4846 if (i.types[op].bitfield.reg8
4847 && (i.tm.operand_types[op].bitfield.reg16
4848 || i.tm.operand_types[op].bitfield.reg32
4849 || i.tm.operand_types[op].bitfield.acc))
4851 as_bad (_("`%s%s' not allowed with `%s%c'"),
4853 i.op[op].regs->reg_name,
4858 /* Warn if the e prefix on a general reg is missing. */
4859 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4860 && i.types[op].bitfield.reg16
4861 && (i.tm.operand_types[op].bitfield.reg32
4862 || i.tm.operand_types[op].bitfield.acc))
4864 /* Prohibit these changes in the 64bit mode, since the
4865 lowering is more complicated. */
4866 if (flag_code == CODE_64BIT)
4868 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4869 register_prefix, i.op[op].regs->reg_name,
4873 #if REGISTER_WARNINGS
4875 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4877 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4879 i.op[op].regs->reg_name,
4883 /* Warn if the r prefix on a general reg is missing. */
4884 else if (i.types[op].bitfield.reg64
4885 && (i.tm.operand_types[op].bitfield.reg32
4886 || i.tm.operand_types[op].bitfield.acc))
4889 && i.tm.opcode_modifier.toqword
4890 && !i.types[0].bitfield.regxmm)
4892 /* Convert to QWORD. We want REX byte. */
4893 i.suffix = QWORD_MNEM_SUFFIX;
4897 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4898 register_prefix, i.op[op].regs->reg_name,
4907 check_qword_reg (void)
4911 for (op = i.operands; --op >= 0; )
4912 /* Reject eight bit registers, except where the template requires
4913 them. (eg. movzb) */
4914 if (i.types[op].bitfield.reg8
4915 && (i.tm.operand_types[op].bitfield.reg16
4916 || i.tm.operand_types[op].bitfield.reg32
4917 || i.tm.operand_types[op].bitfield.acc))
4919 as_bad (_("`%s%s' not allowed with `%s%c'"),
4921 i.op[op].regs->reg_name,
4926 /* Warn if the e prefix on a general reg is missing. */
4927 else if ((i.types[op].bitfield.reg16
4928 || i.types[op].bitfield.reg32)
4929 && (i.tm.operand_types[op].bitfield.reg32
4930 || i.tm.operand_types[op].bitfield.acc))
4932 /* Prohibit these changes in the 64bit mode, since the
4933 lowering is more complicated. */
4935 && i.tm.opcode_modifier.todword
4936 && !i.types[0].bitfield.regxmm)
4938 /* Convert to DWORD. We don't want REX byte. */
4939 i.suffix = LONG_MNEM_SUFFIX;
4943 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4944 register_prefix, i.op[op].regs->reg_name,
4953 check_word_reg (void)
4956 for (op = i.operands; --op >= 0;)
4957 /* Reject eight bit registers, except where the template requires
4958 them. (eg. movzb) */
4959 if (i.types[op].bitfield.reg8
4960 && (i.tm.operand_types[op].bitfield.reg16
4961 || i.tm.operand_types[op].bitfield.reg32
4962 || i.tm.operand_types[op].bitfield.acc))
4964 as_bad (_("`%s%s' not allowed with `%s%c'"),
4966 i.op[op].regs->reg_name,
4971 /* Warn if the e prefix on a general reg is present. */
4972 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4973 && i.types[op].bitfield.reg32
4974 && (i.tm.operand_types[op].bitfield.reg16
4975 || i.tm.operand_types[op].bitfield.acc))
4977 /* Prohibit these changes in the 64bit mode, since the
4978 lowering is more complicated. */
4979 if (flag_code == CODE_64BIT)
4981 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4982 register_prefix, i.op[op].regs->reg_name,
4987 #if REGISTER_WARNINGS
4988 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4990 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4992 i.op[op].regs->reg_name,
5000 update_imm (unsigned int j)
5002 i386_operand_type overlap = i.types[j];
5003 if ((overlap.bitfield.imm8
5004 || overlap.bitfield.imm8s
5005 || overlap.bitfield.imm16
5006 || overlap.bitfield.imm32
5007 || overlap.bitfield.imm32s
5008 || overlap.bitfield.imm64)
5009 && !operand_type_equal (&overlap, &imm8)
5010 && !operand_type_equal (&overlap, &imm8s)
5011 && !operand_type_equal (&overlap, &imm16)
5012 && !operand_type_equal (&overlap, &imm32)
5013 && !operand_type_equal (&overlap, &imm32s)
5014 && !operand_type_equal (&overlap, &imm64))
5018 i386_operand_type temp;
5020 operand_type_set (&temp, 0);
5021 if (i.suffix == BYTE_MNEM_SUFFIX)
5023 temp.bitfield.imm8 = overlap.bitfield.imm8;
5024 temp.bitfield.imm8s = overlap.bitfield.imm8s;
5026 else if (i.suffix == WORD_MNEM_SUFFIX)
5027 temp.bitfield.imm16 = overlap.bitfield.imm16;
5028 else if (i.suffix == QWORD_MNEM_SUFFIX)
5030 temp.bitfield.imm64 = overlap.bitfield.imm64;
5031 temp.bitfield.imm32s = overlap.bitfield.imm32s;
5034 temp.bitfield.imm32 = overlap.bitfield.imm32;
5037 else if (operand_type_equal (&overlap, &imm16_32_32s)
5038 || operand_type_equal (&overlap, &imm16_32)
5039 || operand_type_equal (&overlap, &imm16_32s))
5041 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5046 if (!operand_type_equal (&overlap, &imm8)
5047 && !operand_type_equal (&overlap, &imm8s)
5048 && !operand_type_equal (&overlap, &imm16)
5049 && !operand_type_equal (&overlap, &imm32)
5050 && !operand_type_equal (&overlap, &imm32s)
5051 && !operand_type_equal (&overlap, &imm64))
5053 as_bad (_("no instruction mnemonic suffix given; "
5054 "can't determine immediate size"));
5058 i.types[j] = overlap;
5068 /* Update the first 2 immediate operands. */
5069 n = i.operands > 2 ? 2 : i.operands;
5072 for (j = 0; j < n; j++)
5073 if (update_imm (j) == 0)
5076 /* The 3rd operand can't be immediate operand. */
5077 gas_assert (operand_type_check (i.types[2], imm) == 0);
5084 bad_implicit_operand (int xmm)
5086 const char *ireg = xmm ? "xmm0" : "ymm0";
5089 as_bad (_("the last operand of `%s' must be `%s%s'"),
5090 i.tm.name, register_prefix, ireg);
5092 as_bad (_("the first operand of `%s' must be `%s%s'"),
5093 i.tm.name, register_prefix, ireg);
5098 process_operands (void)
5100 /* Default segment register this instruction will use for memory
5101 accesses. 0 means unknown. This is only for optimizing out
5102 unnecessary segment overrides. */
5103 const seg_entry *default_seg = 0;
5105 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5107 unsigned int dupl = i.operands;
5108 unsigned int dest = dupl - 1;
5111 /* The destination must be an xmm register. */
5112 gas_assert (i.reg_operands
5113 && MAX_OPERANDS > dupl
5114 && operand_type_equal (&i.types[dest], ®xmm));
5116 if (i.tm.opcode_modifier.firstxmm0)
5118 /* The first operand is implicit and must be xmm0. */
5119 gas_assert (operand_type_equal (&i.types[0], ®xmm));
5120 if (register_number (i.op[0].regs) != 0)
5121 return bad_implicit_operand (1);
5123 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5125 /* Keep xmm0 for instructions with VEX prefix and 3
5131 /* We remove the first xmm0 and keep the number of
5132 operands unchanged, which in fact duplicates the
5134 for (j = 1; j < i.operands; j++)
5136 i.op[j - 1] = i.op[j];
5137 i.types[j - 1] = i.types[j];
5138 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5142 else if (i.tm.opcode_modifier.implicit1stxmm0)
5144 gas_assert ((MAX_OPERANDS - 1) > dupl
5145 && (i.tm.opcode_modifier.vexsources
5148 /* Add the implicit xmm0 for instructions with VEX prefix
5150 for (j = i.operands; j > 0; j--)
5152 i.op[j] = i.op[j - 1];
5153 i.types[j] = i.types[j - 1];
5154 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5157 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5158 i.types[0] = regxmm;
5159 i.tm.operand_types[0] = regxmm;
5162 i.reg_operands += 2;
5167 i.op[dupl] = i.op[dest];
5168 i.types[dupl] = i.types[dest];
5169 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5178 i.op[dupl] = i.op[dest];
5179 i.types[dupl] = i.types[dest];
5180 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5183 if (i.tm.opcode_modifier.immext)
5186 else if (i.tm.opcode_modifier.firstxmm0)
5190 /* The first operand is implicit and must be xmm0/ymm0. */
5191 gas_assert (i.reg_operands
5192 && (operand_type_equal (&i.types[0], ®xmm)
5193 || operand_type_equal (&i.types[0], ®ymm)));
5194 if (register_number (i.op[0].regs) != 0)
5195 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5197 for (j = 1; j < i.operands; j++)
5199 i.op[j - 1] = i.op[j];
5200 i.types[j - 1] = i.types[j];
5202 /* We need to adjust fields in i.tm since they are used by
5203 build_modrm_byte. */
5204 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5211 else if (i.tm.opcode_modifier.regkludge)
5213 /* The imul $imm, %reg instruction is converted into
5214 imul $imm, %reg, %reg, and the clr %reg instruction
5215 is converted into xor %reg, %reg. */
5217 unsigned int first_reg_op;
5219 if (operand_type_check (i.types[0], reg))
5223 /* Pretend we saw the extra register operand. */
5224 gas_assert (i.reg_operands == 1
5225 && i.op[first_reg_op + 1].regs == 0);
5226 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5227 i.types[first_reg_op + 1] = i.types[first_reg_op];
5232 if (i.tm.opcode_modifier.shortform)
5234 if (i.types[0].bitfield.sreg2
5235 || i.types[0].bitfield.sreg3)
5237 if (i.tm.base_opcode == POP_SEG_SHORT
5238 && i.op[0].regs->reg_num == 1)
5240 as_bad (_("you can't `pop %scs'"), register_prefix);
5243 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5244 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5249 /* The register or float register operand is in operand
5253 if (i.types[0].bitfield.floatreg
5254 || operand_type_check (i.types[0], reg))
5258 /* Register goes in low 3 bits of opcode. */
5259 i.tm.base_opcode |= i.op[op].regs->reg_num;
5260 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5262 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5264 /* Warn about some common errors, but press on regardless.
5265 The first case can be generated by gcc (<= 2.8.1). */
5266 if (i.operands == 2)
5268 /* Reversed arguments on faddp, fsubp, etc. */
5269 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5270 register_prefix, i.op[!intel_syntax].regs->reg_name,
5271 register_prefix, i.op[intel_syntax].regs->reg_name);
5275 /* Extraneous `l' suffix on fp insn. */
5276 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5277 register_prefix, i.op[0].regs->reg_name);
5282 else if (i.tm.opcode_modifier.modrm)
5284 /* The opcode is completed (modulo i.tm.extension_opcode which
5285 must be put into the modrm byte). Now, we make the modrm and
5286 index base bytes based on all the info we've collected. */
5288 default_seg = build_modrm_byte ();
5290 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5294 else if (i.tm.opcode_modifier.isstring)
5296 /* For the string instructions that allow a segment override
5297 on one of their operands, the default segment is ds. */
5301 if (i.tm.base_opcode == 0x8d /* lea */
5304 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5306 /* If a segment was explicitly specified, and the specified segment
5307 is not the default, use an opcode prefix to select it. If we
5308 never figured out what the default segment is, then default_seg
5309 will be zero at this point, and the specified segment prefix will
5311 if ((i.seg[0]) && (i.seg[0] != default_seg))
5313 if (!add_prefix (i.seg[0]->seg_prefix))
5319 static const seg_entry *
5320 build_modrm_byte (void)
5322 const seg_entry *default_seg = 0;
5323 unsigned int source, dest;
5326 /* The first operand of instructions with VEX prefix and 3 sources
5327 must be VEX_Imm4. */
5328 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5331 unsigned int nds, reg_slot;
5334 if (i.tm.opcode_modifier.veximmext
5335 && i.tm.opcode_modifier.immext)
5337 dest = i.operands - 2;
5338 gas_assert (dest == 3);
5341 dest = i.operands - 1;
5344 /* There are 2 kinds of instructions:
5345 1. 5 operands: 4 register operands or 3 register operands
5346 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5347 VexW0 or VexW1. The destination must be either XMM or YMM
5349 2. 4 operands: 4 register operands or 3 register operands
5350 plus 1 memory operand, VexXDS, and VexImmExt */
5351 gas_assert ((i.reg_operands == 4
5352 || (i.reg_operands == 3 && i.mem_operands == 1))
5353 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5354 && (i.tm.opcode_modifier.veximmext
5355 || (i.imm_operands == 1
5356 && i.types[0].bitfield.vec_imm4
5357 && (i.tm.opcode_modifier.vexw == VEXW0
5358 || i.tm.opcode_modifier.vexw == VEXW1)
5359 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5360 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)))));
5362 if (i.imm_operands == 0)
5364 /* When there is no immediate operand, generate an 8bit
5365 immediate operand to encode the first operand. */
5366 exp = &im_expressions[i.imm_operands++];
5367 i.op[i.operands].imms = exp;
5368 i.types[i.operands] = imm8;
5370 /* If VexW1 is set, the first operand is the source and
5371 the second operand is encoded in the immediate operand. */
5372 if (i.tm.opcode_modifier.vexw == VEXW1)
5383 /* FMA swaps REG and NDS. */
5384 if (i.tm.cpu_flags.bitfield.cpufma)
5392 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5394 || operand_type_equal (&i.tm.operand_types[reg_slot],
5396 exp->X_op = O_constant;
5397 exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
5401 unsigned int imm_slot;
5403 if (i.tm.opcode_modifier.vexw == VEXW0)
5405 /* If VexW0 is set, the third operand is the source and
5406 the second operand is encoded in the immediate
5413 /* VexW1 is set, the second operand is the source and
5414 the third operand is encoded in the immediate
5420 if (i.tm.opcode_modifier.immext)
5422 /* When ImmExt is set, the immdiate byte is the last
5424 imm_slot = i.operands - 1;
5432 /* Turn on Imm8 so that output_imm will generate it. */
5433 i.types[imm_slot].bitfield.imm8 = 1;
5436 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5438 || operand_type_equal (&i.tm.operand_types[reg_slot],
5440 i.op[imm_slot].imms->X_add_number
5441 |= register_number (i.op[reg_slot].regs) << 4;
5444 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
5445 || operand_type_equal (&i.tm.operand_types[nds],
5447 i.vex.register_specifier = i.op[nds].regs;
5452 /* i.reg_operands MUST be the number of real register operands;
5453 implicit registers do not count. If there are 3 register
5454 operands, it must be a instruction with VexNDS. For a
5455 instruction with VexNDD, the destination register is encoded
5456 in VEX prefix. If there are 4 register operands, it must be
5457 a instruction with VEX prefix and 3 sources. */
5458 if (i.mem_operands == 0
5459 && ((i.reg_operands == 2
5460 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5461 || (i.reg_operands == 3
5462 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5463 || (i.reg_operands == 4 && vex_3_sources)))
5471 /* When there are 3 operands, one of them may be immediate,
5472 which may be the first or the last operand. Otherwise,
5473 the first operand must be shift count register (cl) or it
5474 is an instruction with VexNDS. */
5475 gas_assert (i.imm_operands == 1
5476 || (i.imm_operands == 0
5477 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5478 || i.types[0].bitfield.shiftcount)));
5479 if (operand_type_check (i.types[0], imm)
5480 || i.types[0].bitfield.shiftcount)
5486 /* When there are 4 operands, the first two must be 8bit
5487 immediate operands. The source operand will be the 3rd
5490 For instructions with VexNDS, if the first operand
5491 an imm8, the source operand is the 2nd one. If the last
5492 operand is imm8, the source operand is the first one. */
5493 gas_assert ((i.imm_operands == 2
5494 && i.types[0].bitfield.imm8
5495 && i.types[1].bitfield.imm8)
5496 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5497 && i.imm_operands == 1
5498 && (i.types[0].bitfield.imm8
5499 || i.types[i.operands - 1].bitfield.imm8)));
5500 if (i.imm_operands == 2)
5504 if (i.types[0].bitfield.imm8)
5520 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5522 /* For instructions with VexNDS, the register-only
5523 source operand must be 32/64bit integer, XMM or
5524 YMM register. It is encoded in VEX prefix. We
5525 need to clear RegMem bit before calling
5526 operand_type_equal. */
5528 i386_operand_type op;
5531 /* Check register-only source operand when two source
5532 operands are swapped. */
5533 if (!i.tm.operand_types[source].bitfield.baseindex
5534 && i.tm.operand_types[dest].bitfield.baseindex)
5542 op = i.tm.operand_types[vvvv];
5543 op.bitfield.regmem = 0;
5544 if ((dest + 1) >= i.operands
5545 || (op.bitfield.reg32 != 1
5546 && !op.bitfield.reg64 != 1
5547 && !operand_type_equal (&op, ®xmm)
5548 && !operand_type_equal (&op, ®ymm)))
5550 i.vex.register_specifier = i.op[vvvv].regs;
5556 /* One of the register operands will be encoded in the i.tm.reg
5557 field, the other in the combined i.tm.mode and i.tm.regmem
5558 fields. If no form of this instruction supports a memory
5559 destination operand, then we assume the source operand may
5560 sometimes be a memory operand and so we need to store the
5561 destination in the i.rm.reg field. */
5562 if (!i.tm.operand_types[dest].bitfield.regmem
5563 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5565 i.rm.reg = i.op[dest].regs->reg_num;
5566 i.rm.regmem = i.op[source].regs->reg_num;
5567 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5569 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5574 i.rm.reg = i.op[source].regs->reg_num;
5575 i.rm.regmem = i.op[dest].regs->reg_num;
5576 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5578 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5581 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5583 if (!i.types[0].bitfield.control
5584 && !i.types[1].bitfield.control)
5586 i.rex &= ~(REX_R | REX_B);
5587 add_prefix (LOCK_PREFIX_OPCODE);
5591 { /* If it's not 2 reg operands... */
5596 unsigned int fake_zero_displacement = 0;
5599 for (op = 0; op < i.operands; op++)
5600 if (operand_type_check (i.types[op], anymem))
5602 gas_assert (op < i.operands);
5604 if (i.tm.opcode_modifier.vecsib)
5606 if (i.index_reg->reg_num == RegEiz
5607 || i.index_reg->reg_num == RegRiz)
5610 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5613 i.sib.base = NO_BASE_REGISTER;
5614 i.sib.scale = i.log2_scale_factor;
5615 i.types[op].bitfield.disp8 = 0;
5616 i.types[op].bitfield.disp16 = 0;
5617 i.types[op].bitfield.disp64 = 0;
5618 if (flag_code != CODE_64BIT)
5620 /* Must be 32 bit */
5621 i.types[op].bitfield.disp32 = 1;
5622 i.types[op].bitfield.disp32s = 0;
5626 i.types[op].bitfield.disp32 = 0;
5627 i.types[op].bitfield.disp32s = 1;
5630 i.sib.index = i.index_reg->reg_num;
5631 if ((i.index_reg->reg_flags & RegRex) != 0)
5637 if (i.base_reg == 0)
5640 if (!i.disp_operands)
5642 fake_zero_displacement = 1;
5643 /* Instructions with VSIB byte need 32bit displacement
5644 if there is no base register. */
5645 if (i.tm.opcode_modifier.vecsib)
5646 i.types[op].bitfield.disp32 = 1;
5648 if (i.index_reg == 0)
5650 gas_assert (!i.tm.opcode_modifier.vecsib);
5651 /* Operand is just <disp> */
5652 if (flag_code == CODE_64BIT)
5654 /* 64bit mode overwrites the 32bit absolute
5655 addressing by RIP relative addressing and
5656 absolute addressing is encoded by one of the
5657 redundant SIB forms. */
5658 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5659 i.sib.base = NO_BASE_REGISTER;
5660 i.sib.index = NO_INDEX_REGISTER;
5661 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5662 ? disp32s : disp32);
5664 else if ((flag_code == CODE_16BIT)
5665 ^ (i.prefix[ADDR_PREFIX] != 0))
5667 i.rm.regmem = NO_BASE_REGISTER_16;
5668 i.types[op] = disp16;
5672 i.rm.regmem = NO_BASE_REGISTER;
5673 i.types[op] = disp32;
5676 else if (!i.tm.opcode_modifier.vecsib)
5678 /* !i.base_reg && i.index_reg */
5679 if (i.index_reg->reg_num == RegEiz
5680 || i.index_reg->reg_num == RegRiz)
5681 i.sib.index = NO_INDEX_REGISTER;
5683 i.sib.index = i.index_reg->reg_num;
5684 i.sib.base = NO_BASE_REGISTER;
5685 i.sib.scale = i.log2_scale_factor;
5686 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5687 i.types[op].bitfield.disp8 = 0;
5688 i.types[op].bitfield.disp16 = 0;
5689 i.types[op].bitfield.disp64 = 0;
5690 if (flag_code != CODE_64BIT)
5692 /* Must be 32 bit */
5693 i.types[op].bitfield.disp32 = 1;
5694 i.types[op].bitfield.disp32s = 0;
5698 i.types[op].bitfield.disp32 = 0;
5699 i.types[op].bitfield.disp32s = 1;
5701 if ((i.index_reg->reg_flags & RegRex) != 0)
5705 /* RIP addressing for 64bit mode. */
5706 else if (i.base_reg->reg_num == RegRip ||
5707 i.base_reg->reg_num == RegEip)
5709 gas_assert (!i.tm.opcode_modifier.vecsib);
5710 i.rm.regmem = NO_BASE_REGISTER;
5711 i.types[op].bitfield.disp8 = 0;
5712 i.types[op].bitfield.disp16 = 0;
5713 i.types[op].bitfield.disp32 = 0;
5714 i.types[op].bitfield.disp32s = 1;
5715 i.types[op].bitfield.disp64 = 0;
5716 i.flags[op] |= Operand_PCrel;
5717 if (! i.disp_operands)
5718 fake_zero_displacement = 1;
5720 else if (i.base_reg->reg_type.bitfield.reg16)
5722 gas_assert (!i.tm.opcode_modifier.vecsib);
5723 switch (i.base_reg->reg_num)
5726 if (i.index_reg == 0)
5728 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5729 i.rm.regmem = i.index_reg->reg_num - 6;
5733 if (i.index_reg == 0)
5736 if (operand_type_check (i.types[op], disp) == 0)
5738 /* fake (%bp) into 0(%bp) */
5739 i.types[op].bitfield.disp8 = 1;
5740 fake_zero_displacement = 1;
5743 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5744 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5746 default: /* (%si) -> 4 or (%di) -> 5 */
5747 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5749 i.rm.mode = mode_from_disp_size (i.types[op]);
5751 else /* i.base_reg and 32/64 bit mode */
5753 if (flag_code == CODE_64BIT
5754 && operand_type_check (i.types[op], disp))
5756 i386_operand_type temp;
5757 operand_type_set (&temp, 0);
5758 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5760 if (i.prefix[ADDR_PREFIX] == 0)
5761 i.types[op].bitfield.disp32s = 1;
5763 i.types[op].bitfield.disp32 = 1;
5766 if (!i.tm.opcode_modifier.vecsib)
5767 i.rm.regmem = i.base_reg->reg_num;
5768 if ((i.base_reg->reg_flags & RegRex) != 0)
5770 i.sib.base = i.base_reg->reg_num;
5771 /* x86-64 ignores REX prefix bit here to avoid decoder
5773 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5776 if (i.disp_operands == 0)
5778 fake_zero_displacement = 1;
5779 i.types[op].bitfield.disp8 = 1;
5782 else if (i.base_reg->reg_num == ESP_REG_NUM)
5786 i.sib.scale = i.log2_scale_factor;
5787 if (i.index_reg == 0)
5789 gas_assert (!i.tm.opcode_modifier.vecsib);
5790 /* <disp>(%esp) becomes two byte modrm with no index
5791 register. We've already stored the code for esp
5792 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5793 Any base register besides %esp will not use the
5794 extra modrm byte. */
5795 i.sib.index = NO_INDEX_REGISTER;
5797 else if (!i.tm.opcode_modifier.vecsib)
5799 if (i.index_reg->reg_num == RegEiz
5800 || i.index_reg->reg_num == RegRiz)
5801 i.sib.index = NO_INDEX_REGISTER;
5803 i.sib.index = i.index_reg->reg_num;
5804 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5805 if ((i.index_reg->reg_flags & RegRex) != 0)
5810 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5811 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5815 if (!fake_zero_displacement
5819 fake_zero_displacement = 1;
5820 if (i.disp_encoding == disp_encoding_8bit)
5821 i.types[op].bitfield.disp8 = 1;
5823 i.types[op].bitfield.disp32 = 1;
5825 i.rm.mode = mode_from_disp_size (i.types[op]);
5829 if (fake_zero_displacement)
5831 /* Fakes a zero displacement assuming that i.types[op]
5832 holds the correct displacement size. */
5835 gas_assert (i.op[op].disps == 0);
5836 exp = &disp_expressions[i.disp_operands++];
5837 i.op[op].disps = exp;
5838 exp->X_op = O_constant;
5839 exp->X_add_number = 0;
5840 exp->X_add_symbol = (symbolS *) 0;
5841 exp->X_op_symbol = (symbolS *) 0;
5849 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5851 if (operand_type_check (i.types[0], imm))
5852 i.vex.register_specifier = NULL;
5855 /* VEX.vvvv encodes one of the sources when the first
5856 operand is not an immediate. */
5857 if (i.tm.opcode_modifier.vexw == VEXW0)
5858 i.vex.register_specifier = i.op[0].regs;
5860 i.vex.register_specifier = i.op[1].regs;
5863 /* Destination is a XMM register encoded in the ModRM.reg
5865 i.rm.reg = i.op[2].regs->reg_num;
5866 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5869 /* ModRM.rm and VEX.B encodes the other source. */
5870 if (!i.mem_operands)
5874 if (i.tm.opcode_modifier.vexw == VEXW0)
5875 i.rm.regmem = i.op[1].regs->reg_num;
5877 i.rm.regmem = i.op[0].regs->reg_num;
5879 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5883 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5885 i.vex.register_specifier = i.op[2].regs;
5886 if (!i.mem_operands)
5889 i.rm.regmem = i.op[1].regs->reg_num;
5890 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5894 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5895 (if any) based on i.tm.extension_opcode. Again, we must be
5896 careful to make sure that segment/control/debug/test/MMX
5897 registers are coded into the i.rm.reg field. */
5898 else if (i.reg_operands)
5901 unsigned int vex_reg = ~0;
5903 for (op = 0; op < i.operands; op++)
5904 if (i.types[op].bitfield.reg8
5905 || i.types[op].bitfield.reg16
5906 || i.types[op].bitfield.reg32
5907 || i.types[op].bitfield.reg64
5908 || i.types[op].bitfield.regmmx
5909 || i.types[op].bitfield.regxmm
5910 || i.types[op].bitfield.regymm
5911 || i.types[op].bitfield.sreg2
5912 || i.types[op].bitfield.sreg3
5913 || i.types[op].bitfield.control
5914 || i.types[op].bitfield.debug
5915 || i.types[op].bitfield.test)
5920 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5922 /* For instructions with VexNDS, the register-only
5923 source operand is encoded in VEX prefix. */
5924 gas_assert (mem != (unsigned int) ~0);
5929 gas_assert (op < i.operands);
5933 /* Check register-only source operand when two source
5934 operands are swapped. */
5935 if (!i.tm.operand_types[op].bitfield.baseindex
5936 && i.tm.operand_types[op + 1].bitfield.baseindex)
5940 gas_assert (mem == (vex_reg + 1)
5941 && op < i.operands);
5946 gas_assert (vex_reg < i.operands);
5950 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5952 /* For instructions with VexNDD, the register destination
5953 is encoded in VEX prefix. */
5954 if (i.mem_operands == 0)
5956 /* There is no memory operand. */
5957 gas_assert ((op + 2) == i.operands);
5962 /* There are only 2 operands. */
5963 gas_assert (op < 2 && i.operands == 2);
5968 gas_assert (op < i.operands);
5970 if (vex_reg != (unsigned int) ~0)
5972 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5974 if (type->bitfield.reg32 != 1
5975 && type->bitfield.reg64 != 1
5976 && !operand_type_equal (type, ®xmm)
5977 && !operand_type_equal (type, ®ymm))
5980 i.vex.register_specifier = i.op[vex_reg].regs;
5983 /* Don't set OP operand twice. */
5986 /* If there is an extension opcode to put here, the
5987 register number must be put into the regmem field. */
5988 if (i.tm.extension_opcode != None)
5990 i.rm.regmem = i.op[op].regs->reg_num;
5991 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5996 i.rm.reg = i.op[op].regs->reg_num;
5997 if ((i.op[op].regs->reg_flags & RegRex) != 0)
6002 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6003 must set it to 3 to indicate this is a register operand
6004 in the regmem field. */
6005 if (!i.mem_operands)
6009 /* Fill in i.rm.reg field with extension opcode (if any). */
6010 if (i.tm.extension_opcode != None)
6011 i.rm.reg = i.tm.extension_opcode;
6017 output_branch (void)
6023 relax_substateT subtype;
6027 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6028 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6031 if (i.prefix[DATA_PREFIX] != 0)
6037 /* Pentium4 branch hints. */
6038 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6039 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6044 if (i.prefix[REX_PREFIX] != 0)
6050 if (i.prefixes != 0 && !intel_syntax)
6051 as_warn (_("skipping prefixes on this instruction"));
6053 /* It's always a symbol; End frag & setup for relax.
6054 Make sure there is enough room in this frag for the largest
6055 instruction we may generate in md_convert_frag. This is 2
6056 bytes for the opcode and room for the prefix and largest
6058 frag_grow (prefix + 2 + 4);
6059 /* Prefix and 1 opcode byte go in fr_fix. */
6060 p = frag_more (prefix + 1);
6061 if (i.prefix[DATA_PREFIX] != 0)
6062 *p++ = DATA_PREFIX_OPCODE;
6063 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6064 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6065 *p++ = i.prefix[SEG_PREFIX];
6066 if (i.prefix[REX_PREFIX] != 0)
6067 *p++ = i.prefix[REX_PREFIX];
6068 *p = i.tm.base_opcode;
6070 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6071 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6072 else if (cpu_arch_flags.bitfield.cpui386)
6073 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6075 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6078 sym = i.op[0].disps->X_add_symbol;
6079 off = i.op[0].disps->X_add_number;
6081 if (i.op[0].disps->X_op != O_constant
6082 && i.op[0].disps->X_op != O_symbol)
6084 /* Handle complex expressions. */
6085 sym = make_expr_symbol (i.op[0].disps);
6089 /* 1 possible extra opcode + 4 byte displacement go in var part.
6090 Pass reloc in fr_var. */
6091 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6101 if (i.tm.opcode_modifier.jumpbyte)
6103 /* This is a loop or jecxz type instruction. */
6105 if (i.prefix[ADDR_PREFIX] != 0)
6107 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6110 /* Pentium4 branch hints. */
6111 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6112 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6114 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6123 if (flag_code == CODE_16BIT)
6126 if (i.prefix[DATA_PREFIX] != 0)
6128 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6138 if (i.prefix[REX_PREFIX] != 0)
6140 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6144 if (i.prefixes != 0 && !intel_syntax)
6145 as_warn (_("skipping prefixes on this instruction"));
6147 p = frag_more (i.tm.opcode_length + size);
6148 switch (i.tm.opcode_length)
6151 *p++ = i.tm.base_opcode >> 8;
6153 *p++ = i.tm.base_opcode;
6159 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6160 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6162 /* All jumps handled here are signed, but don't use a signed limit
6163 check for 32 and 16 bit jumps as we want to allow wrap around at
6164 4G and 64k respectively. */
6166 fixP->fx_signed = 1;
6170 output_interseg_jump (void)
6178 if (flag_code == CODE_16BIT)
6182 if (i.prefix[DATA_PREFIX] != 0)
6188 if (i.prefix[REX_PREFIX] != 0)
6198 if (i.prefixes != 0 && !intel_syntax)
6199 as_warn (_("skipping prefixes on this instruction"));
6201 /* 1 opcode; 2 segment; offset */
6202 p = frag_more (prefix + 1 + 2 + size);
6204 if (i.prefix[DATA_PREFIX] != 0)
6205 *p++ = DATA_PREFIX_OPCODE;
6207 if (i.prefix[REX_PREFIX] != 0)
6208 *p++ = i.prefix[REX_PREFIX];
6210 *p++ = i.tm.base_opcode;
6211 if (i.op[1].imms->X_op == O_constant)
6213 offsetT n = i.op[1].imms->X_add_number;
6216 && !fits_in_unsigned_word (n)
6217 && !fits_in_signed_word (n))
6219 as_bad (_("16-bit jump out of range"));
6222 md_number_to_chars (p, n, size);
6225 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6226 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6227 if (i.op[0].imms->X_op != O_constant)
6228 as_bad (_("can't handle non absolute segment in `%s'"),
6230 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6236 fragS *insn_start_frag;
6237 offsetT insn_start_off;
6239 /* Tie dwarf2 debug info to the address at the start of the insn.
6240 We can't do this after the insn has been output as the current
6241 frag may have been closed off. eg. by frag_var. */
6242 dwarf2_emit_insn (0);
6244 insn_start_frag = frag_now;
6245 insn_start_off = frag_now_fix ();
6248 if (i.tm.opcode_modifier.jump)
6250 else if (i.tm.opcode_modifier.jumpbyte
6251 || i.tm.opcode_modifier.jumpdword)
6253 else if (i.tm.opcode_modifier.jumpintersegment)
6254 output_interseg_jump ();
6257 /* Output normal instructions here. */
6261 unsigned int prefix;
6263 /* Since the VEX prefix contains the implicit prefix, we don't
6264 need the explicit prefix. */
6265 if (!i.tm.opcode_modifier.vex)
6267 switch (i.tm.opcode_length)
6270 if (i.tm.base_opcode & 0xff000000)
6272 prefix = (i.tm.base_opcode >> 24) & 0xff;
6277 if ((i.tm.base_opcode & 0xff0000) != 0)
6279 prefix = (i.tm.base_opcode >> 16) & 0xff;
6280 if (i.tm.cpu_flags.bitfield.cpupadlock)
6283 if (prefix != REPE_PREFIX_OPCODE
6284 || (i.prefix[REP_PREFIX]
6285 != REPE_PREFIX_OPCODE))
6286 add_prefix (prefix);
6289 add_prefix (prefix);
6298 /* The prefix bytes. */
6299 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6301 FRAG_APPEND_1_CHAR (*q);
6305 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6310 /* REX byte is encoded in VEX prefix. */
6314 FRAG_APPEND_1_CHAR (*q);
6317 /* There should be no other prefixes for instructions
6322 /* Now the VEX prefix. */
6323 p = frag_more (i.vex.length);
6324 for (j = 0; j < i.vex.length; j++)
6325 p[j] = i.vex.bytes[j];
6328 /* Now the opcode; be careful about word order here! */
6329 if (i.tm.opcode_length == 1)
6331 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6335 switch (i.tm.opcode_length)
6339 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6349 /* Put out high byte first: can't use md_number_to_chars! */
6350 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6351 *p = i.tm.base_opcode & 0xff;
6354 /* Now the modrm byte and sib byte (if present). */
6355 if (i.tm.opcode_modifier.modrm)
6357 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6360 /* If i.rm.regmem == ESP (4)
6361 && i.rm.mode != (Register mode)
6363 ==> need second modrm byte. */
6364 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6366 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6367 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6369 | i.sib.scale << 6));
6372 if (i.disp_operands)
6373 output_disp (insn_start_frag, insn_start_off);
6376 output_imm (insn_start_frag, insn_start_off);
6382 pi ("" /*line*/, &i);
6384 #endif /* DEBUG386 */
6387 /* Return the size of the displacement operand N. */
6390 disp_size (unsigned int n)
6393 if (i.types[n].bitfield.disp64)
6395 else if (i.types[n].bitfield.disp8)
6397 else if (i.types[n].bitfield.disp16)
6402 /* Return the size of the immediate operand N. */
6405 imm_size (unsigned int n)
6408 if (i.types[n].bitfield.imm64)
6410 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6412 else if (i.types[n].bitfield.imm16)
6418 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6423 for (n = 0; n < i.operands; n++)
6425 if (operand_type_check (i.types[n], disp))
6427 if (i.op[n].disps->X_op == O_constant)
6429 int size = disp_size (n);
6432 val = offset_in_range (i.op[n].disps->X_add_number,
6434 p = frag_more (size);
6435 md_number_to_chars (p, val, size);
6439 enum bfd_reloc_code_real reloc_type;
6440 int size = disp_size (n);
6441 int sign = i.types[n].bitfield.disp32s;
6442 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6444 /* We can't have 8 bit displacement here. */
6445 gas_assert (!i.types[n].bitfield.disp8);
6447 /* The PC relative address is computed relative
6448 to the instruction boundary, so in case immediate
6449 fields follows, we need to adjust the value. */
6450 if (pcrel && i.imm_operands)
6455 for (n1 = 0; n1 < i.operands; n1++)
6456 if (operand_type_check (i.types[n1], imm))
6458 /* Only one immediate is allowed for PC
6459 relative address. */
6460 gas_assert (sz == 0);
6462 i.op[n].disps->X_add_number -= sz;
6464 /* We should find the immediate. */
6465 gas_assert (sz != 0);
6468 p = frag_more (size);
6469 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6471 && GOT_symbol == i.op[n].disps->X_add_symbol
6472 && (((reloc_type == BFD_RELOC_32
6473 || reloc_type == BFD_RELOC_X86_64_32S
6474 || (reloc_type == BFD_RELOC_64
6476 && (i.op[n].disps->X_op == O_symbol
6477 || (i.op[n].disps->X_op == O_add
6478 && ((symbol_get_value_expression
6479 (i.op[n].disps->X_op_symbol)->X_op)
6481 || reloc_type == BFD_RELOC_32_PCREL))
6485 if (insn_start_frag == frag_now)
6486 add = (p - frag_now->fr_literal) - insn_start_off;
6491 add = insn_start_frag->fr_fix - insn_start_off;
6492 for (fr = insn_start_frag->fr_next;
6493 fr && fr != frag_now; fr = fr->fr_next)
6495 add += p - frag_now->fr_literal;
6500 reloc_type = BFD_RELOC_386_GOTPC;
6501 i.op[n].imms->X_add_number += add;
6503 else if (reloc_type == BFD_RELOC_64)
6504 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6506 /* Don't do the adjustment for x86-64, as there
6507 the pcrel addressing is relative to the _next_
6508 insn, and that is taken care of in other code. */
6509 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6511 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6512 i.op[n].disps, pcrel, reloc_type);
6519 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6524 for (n = 0; n < i.operands; n++)
6526 if (operand_type_check (i.types[n], imm))
6528 if (i.op[n].imms->X_op == O_constant)
6530 int size = imm_size (n);
6533 val = offset_in_range (i.op[n].imms->X_add_number,
6535 p = frag_more (size);
6536 md_number_to_chars (p, val, size);
6540 /* Not absolute_section.
6541 Need a 32-bit fixup (don't support 8bit
6542 non-absolute imms). Try to support other
6544 enum bfd_reloc_code_real reloc_type;
6545 int size = imm_size (n);
6548 if (i.types[n].bitfield.imm32s
6549 && (i.suffix == QWORD_MNEM_SUFFIX
6550 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6555 p = frag_more (size);
6556 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6558 /* This is tough to explain. We end up with this one if we
6559 * have operands that look like
6560 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6561 * obtain the absolute address of the GOT, and it is strongly
6562 * preferable from a performance point of view to avoid using
6563 * a runtime relocation for this. The actual sequence of
6564 * instructions often look something like:
6569 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6571 * The call and pop essentially return the absolute address
6572 * of the label .L66 and store it in %ebx. The linker itself
6573 * will ultimately change the first operand of the addl so
6574 * that %ebx points to the GOT, but to keep things simple, the
6575 * .o file must have this operand set so that it generates not
6576 * the absolute address of .L66, but the absolute address of
6577 * itself. This allows the linker itself simply treat a GOTPC
6578 * relocation as asking for a pcrel offset to the GOT to be
6579 * added in, and the addend of the relocation is stored in the
6580 * operand field for the instruction itself.
6582 * Our job here is to fix the operand so that it would add
6583 * the correct offset so that %ebx would point to itself. The
6584 * thing that is tricky is that .-.L66 will point to the
6585 * beginning of the instruction, so we need to further modify
6586 * the operand so that it will point to itself. There are
6587 * other cases where you have something like:
6589 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6591 * and here no correction would be required. Internally in
6592 * the assembler we treat operands of this form as not being
6593 * pcrel since the '.' is explicitly mentioned, and I wonder
6594 * whether it would simplify matters to do it this way. Who
6595 * knows. In earlier versions of the PIC patches, the
6596 * pcrel_adjust field was used to store the correction, but
6597 * since the expression is not pcrel, I felt it would be
6598 * confusing to do it this way. */
6600 if ((reloc_type == BFD_RELOC_32
6601 || reloc_type == BFD_RELOC_X86_64_32S
6602 || reloc_type == BFD_RELOC_64)
6604 && GOT_symbol == i.op[n].imms->X_add_symbol
6605 && (i.op[n].imms->X_op == O_symbol
6606 || (i.op[n].imms->X_op == O_add
6607 && ((symbol_get_value_expression
6608 (i.op[n].imms->X_op_symbol)->X_op)
6613 if (insn_start_frag == frag_now)
6614 add = (p - frag_now->fr_literal) - insn_start_off;
6619 add = insn_start_frag->fr_fix - insn_start_off;
6620 for (fr = insn_start_frag->fr_next;
6621 fr && fr != frag_now; fr = fr->fr_next)
6623 add += p - frag_now->fr_literal;
6627 reloc_type = BFD_RELOC_386_GOTPC;
6629 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6631 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6632 i.op[n].imms->X_add_number += add;
6634 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6635 i.op[n].imms, 0, reloc_type);
6641 /* x86_cons_fix_new is called via the expression parsing code when a
6642 reloc is needed. We use this hook to get the correct .got reloc. */
6643 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6644 static int cons_sign = -1;
6647 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6650 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6652 got_reloc = NO_RELOC;
6655 if (exp->X_op == O_secrel)
6657 exp->X_op = O_symbol;
6658 r = BFD_RELOC_32_SECREL;
6662 fix_new_exp (frag, off, len, exp, 0, r);
6665 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
6666 purpose of the `.dc.a' internal pseudo-op. */
6669 x86_address_bytes (void)
6671 if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
6673 return stdoutput->arch_info->bits_per_address / 8;
6676 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6678 # define lex_got(reloc, adjust, types) NULL
6680 /* Parse operands of the form
6681 <symbol>@GOTOFF+<nnn>
6682 and similar .plt or .got references.
6684 If we find one, set up the correct relocation in RELOC and copy the
6685 input string, minus the `@GOTOFF' into a malloc'd buffer for
6686 parsing by the calling routine. Return this buffer, and if ADJUST
6687 is non-null set it to the length of the string we removed from the
6688 input line. Otherwise return NULL. */
6690 lex_got (enum bfd_reloc_code_real *rel,
6692 i386_operand_type *types)
6694 /* Some of the relocations depend on the size of what field is to
6695 be relocated. But in our callers i386_immediate and i386_displacement
6696 we don't yet know the operand size (this will be set by insn
6697 matching). Hence we record the word32 relocation here,
6698 and adjust the reloc according to the real size in reloc(). */
6699 static const struct {
6702 const enum bfd_reloc_code_real rel[2];
6703 const i386_operand_type types64;
6705 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6706 BFD_RELOC_X86_64_PLTOFF64 },
6707 OPERAND_TYPE_IMM64 },
6708 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6709 BFD_RELOC_X86_64_PLT32 },
6710 OPERAND_TYPE_IMM32_32S_DISP32 },
6711 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6712 BFD_RELOC_X86_64_GOTPLT64 },
6713 OPERAND_TYPE_IMM64_DISP64 },
6714 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6715 BFD_RELOC_X86_64_GOTOFF64 },
6716 OPERAND_TYPE_IMM64_DISP64 },
6717 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6718 BFD_RELOC_X86_64_GOTPCREL },
6719 OPERAND_TYPE_IMM32_32S_DISP32 },
6720 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6721 BFD_RELOC_X86_64_TLSGD },
6722 OPERAND_TYPE_IMM32_32S_DISP32 },
6723 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6724 _dummy_first_bfd_reloc_code_real },
6725 OPERAND_TYPE_NONE },
6726 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6727 BFD_RELOC_X86_64_TLSLD },
6728 OPERAND_TYPE_IMM32_32S_DISP32 },
6729 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6730 BFD_RELOC_X86_64_GOTTPOFF },
6731 OPERAND_TYPE_IMM32_32S_DISP32 },
6732 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6733 BFD_RELOC_X86_64_TPOFF32 },
6734 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6735 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6736 _dummy_first_bfd_reloc_code_real },
6737 OPERAND_TYPE_NONE },
6738 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6739 BFD_RELOC_X86_64_DTPOFF32 },
6740 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6741 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6742 _dummy_first_bfd_reloc_code_real },
6743 OPERAND_TYPE_NONE },
6744 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6745 _dummy_first_bfd_reloc_code_real },
6746 OPERAND_TYPE_NONE },
6747 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6748 BFD_RELOC_X86_64_GOT32 },
6749 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6750 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6751 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6752 OPERAND_TYPE_IMM32_32S_DISP32 },
6753 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6754 BFD_RELOC_X86_64_TLSDESC_CALL },
6755 OPERAND_TYPE_IMM32_32S_DISP32 },
6760 #if defined (OBJ_MAYBE_ELF)
6765 for (cp = input_line_pointer; *cp != '@'; cp++)
6766 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6769 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6771 int len = gotrel[j].len;
6772 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6774 if (gotrel[j].rel[object_64bit] != 0)
6777 char *tmpbuf, *past_reloc;
6779 *rel = gotrel[j].rel[object_64bit];
6785 if (flag_code != CODE_64BIT)
6787 types->bitfield.imm32 = 1;
6788 types->bitfield.disp32 = 1;
6791 *types = gotrel[j].types64;
6794 if (GOT_symbol == NULL)
6795 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6797 /* The length of the first part of our input line. */
6798 first = cp - input_line_pointer;
6800 /* The second part goes from after the reloc token until
6801 (and including) an end_of_line char or comma. */
6802 past_reloc = cp + 1 + len;
6804 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6806 second = cp + 1 - past_reloc;
6808 /* Allocate and copy string. The trailing NUL shouldn't
6809 be necessary, but be safe. */
6810 tmpbuf = (char *) xmalloc (first + second + 2);
6811 memcpy (tmpbuf, input_line_pointer, first);
6812 if (second != 0 && *past_reloc != ' ')
6813 /* Replace the relocation token with ' ', so that
6814 errors like foo@GOTOFF1 will be detected. */
6815 tmpbuf[first++] = ' ';
6816 memcpy (tmpbuf + first, past_reloc, second);
6817 tmpbuf[first + second] = '\0';
6821 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6822 gotrel[j].str, 1 << (5 + object_64bit));
6827 /* Might be a symbol version string. Don't as_bad here. */
6836 /* Parse operands of the form
6837 <symbol>@SECREL32+<nnn>
6839 If we find one, set up the correct relocation in RELOC and copy the
6840 input string, minus the `@SECREL32' into a malloc'd buffer for
6841 parsing by the calling routine. Return this buffer, and if ADJUST
6842 is non-null set it to the length of the string we removed from the
6843 input line. Otherwise return NULL.
6845 This function is copied from the ELF version above adjusted for PE targets. */
6848 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
6849 int *adjust ATTRIBUTE_UNUSED,
6850 i386_operand_type *types ATTRIBUTE_UNUSED)
6856 const enum bfd_reloc_code_real rel[2];
6857 const i386_operand_type types64;
6861 { STRING_COMMA_LEN ("SECREL32"), { BFD_RELOC_32_SECREL,
6862 BFD_RELOC_32_SECREL },
6863 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6869 for (cp = input_line_pointer; *cp != '@'; cp++)
6870 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6873 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6875 int len = gotrel[j].len;
6877 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6879 if (gotrel[j].rel[object_64bit] != 0)
6882 char *tmpbuf, *past_reloc;
6884 *rel = gotrel[j].rel[object_64bit];
6890 if (flag_code != CODE_64BIT)
6892 types->bitfield.imm32 = 1;
6893 types->bitfield.disp32 = 1;
6896 *types = gotrel[j].types64;
6899 /* The length of the first part of our input line. */
6900 first = cp - input_line_pointer;
6902 /* The second part goes from after the reloc token until
6903 (and including) an end_of_line char or comma. */
6904 past_reloc = cp + 1 + len;
6906 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6908 second = cp + 1 - past_reloc;
6910 /* Allocate and copy string. The trailing NUL shouldn't
6911 be necessary, but be safe. */
6912 tmpbuf = (char *) xmalloc (first + second + 2);
6913 memcpy (tmpbuf, input_line_pointer, first);
6914 if (second != 0 && *past_reloc != ' ')
6915 /* Replace the relocation token with ' ', so that
6916 errors like foo@SECLREL321 will be detected. */
6917 tmpbuf[first++] = ' ';
6918 memcpy (tmpbuf + first, past_reloc, second);
6919 tmpbuf[first + second] = '\0';
6923 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6924 gotrel[j].str, 1 << (5 + object_64bit));
6929 /* Might be a symbol version string. Don't as_bad here. */
6936 x86_cons (expressionS *exp, int size)
6938 intel_syntax = -intel_syntax;
6941 if (size == 4 || (object_64bit && size == 8))
6943 /* Handle @GOTOFF and the like in an expression. */
6945 char *gotfree_input_line;
6948 save = input_line_pointer;
6949 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6950 if (gotfree_input_line)
6951 input_line_pointer = gotfree_input_line;
6955 if (gotfree_input_line)
6957 /* expression () has merrily parsed up to the end of line,
6958 or a comma - in the wrong buffer. Transfer how far
6959 input_line_pointer has moved to the right buffer. */
6960 input_line_pointer = (save
6961 + (input_line_pointer - gotfree_input_line)
6963 free (gotfree_input_line);
6964 if (exp->X_op == O_constant
6965 || exp->X_op == O_absent
6966 || exp->X_op == O_illegal
6967 || exp->X_op == O_register
6968 || exp->X_op == O_big)
6970 char c = *input_line_pointer;
6971 *input_line_pointer = 0;
6972 as_bad (_("missing or invalid expression `%s'"), save);
6973 *input_line_pointer = c;
6980 intel_syntax = -intel_syntax;
6983 i386_intel_simplify (exp);
6987 signed_cons (int size)
6989 if (flag_code == CODE_64BIT)
6997 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7004 if (exp.X_op == O_symbol)
7005 exp.X_op = O_secrel;
7007 emit_expr (&exp, 4);
7009 while (*input_line_pointer++ == ',');
7011 input_line_pointer--;
7012 demand_empty_rest_of_line ();
7017 i386_immediate (char *imm_start)
7019 char *save_input_line_pointer;
7020 char *gotfree_input_line;
7023 i386_operand_type types;
7025 operand_type_set (&types, ~0);
7027 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
7029 as_bad (_("at most %d immediate operands are allowed"),
7030 MAX_IMMEDIATE_OPERANDS);
7034 exp = &im_expressions[i.imm_operands++];
7035 i.op[this_operand].imms = exp;
7037 if (is_space_char (*imm_start))
7040 save_input_line_pointer = input_line_pointer;
7041 input_line_pointer = imm_start;
7043 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7044 if (gotfree_input_line)
7045 input_line_pointer = gotfree_input_line;
7047 exp_seg = expression (exp);
7050 if (*input_line_pointer)
7051 as_bad (_("junk `%s' after expression"), input_line_pointer);
7053 input_line_pointer = save_input_line_pointer;
7054 if (gotfree_input_line)
7056 free (gotfree_input_line);
7058 if (exp->X_op == O_constant || exp->X_op == O_register)
7059 exp->X_op = O_illegal;
7062 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
7066 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7067 i386_operand_type types, const char *imm_start)
7069 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
7072 as_bad (_("missing or invalid immediate expression `%s'"),
7076 else if (exp->X_op == O_constant)
7078 /* Size it properly later. */
7079 i.types[this_operand].bitfield.imm64 = 1;
7080 /* If not 64bit, sign extend val. */
7081 if (flag_code != CODE_64BIT
7082 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
7084 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
7086 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7087 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
7088 && exp_seg != absolute_section
7089 && exp_seg != text_section
7090 && exp_seg != data_section
7091 && exp_seg != bss_section
7092 && exp_seg != undefined_section
7093 && !bfd_is_com_section (exp_seg))
7095 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7099 else if (!intel_syntax && exp->X_op == O_register)
7102 as_bad (_("illegal immediate register operand %s"), imm_start);
7107 /* This is an address. The size of the address will be
7108 determined later, depending on destination register,
7109 suffix, or the default for the section. */
7110 i.types[this_operand].bitfield.imm8 = 1;
7111 i.types[this_operand].bitfield.imm16 = 1;
7112 i.types[this_operand].bitfield.imm32 = 1;
7113 i.types[this_operand].bitfield.imm32s = 1;
7114 i.types[this_operand].bitfield.imm64 = 1;
7115 i.types[this_operand] = operand_type_and (i.types[this_operand],
7123 i386_scale (char *scale)
7126 char *save = input_line_pointer;
7128 input_line_pointer = scale;
7129 val = get_absolute_expression ();
7134 i.log2_scale_factor = 0;
7137 i.log2_scale_factor = 1;
7140 i.log2_scale_factor = 2;
7143 i.log2_scale_factor = 3;
7147 char sep = *input_line_pointer;
7149 *input_line_pointer = '\0';
7150 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
7152 *input_line_pointer = sep;
7153 input_line_pointer = save;
7157 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7159 as_warn (_("scale factor of %d without an index register"),
7160 1 << i.log2_scale_factor);
7161 i.log2_scale_factor = 0;
7163 scale = input_line_pointer;
7164 input_line_pointer = save;
7169 i386_displacement (char *disp_start, char *disp_end)
7173 char *save_input_line_pointer;
7174 char *gotfree_input_line;
7176 i386_operand_type bigdisp, types = anydisp;
7179 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7181 as_bad (_("at most %d displacement operands are allowed"),
7182 MAX_MEMORY_OPERANDS);
7186 operand_type_set (&bigdisp, 0);
7187 if ((i.types[this_operand].bitfield.jumpabsolute)
7188 || (!current_templates->start->opcode_modifier.jump
7189 && !current_templates->start->opcode_modifier.jumpdword))
7191 bigdisp.bitfield.disp32 = 1;
7192 override = (i.prefix[ADDR_PREFIX] != 0);
7193 if (flag_code == CODE_64BIT)
7197 bigdisp.bitfield.disp32s = 1;
7198 bigdisp.bitfield.disp64 = 1;
7201 else if ((flag_code == CODE_16BIT) ^ override)
7203 bigdisp.bitfield.disp32 = 0;
7204 bigdisp.bitfield.disp16 = 1;
7209 /* For PC-relative branches, the width of the displacement
7210 is dependent upon data size, not address size. */
7211 override = (i.prefix[DATA_PREFIX] != 0);
7212 if (flag_code == CODE_64BIT)
7214 if (override || i.suffix == WORD_MNEM_SUFFIX)
7215 bigdisp.bitfield.disp16 = 1;
7218 bigdisp.bitfield.disp32 = 1;
7219 bigdisp.bitfield.disp32s = 1;
7225 override = (i.suffix == (flag_code != CODE_16BIT
7227 : LONG_MNEM_SUFFIX));
7228 bigdisp.bitfield.disp32 = 1;
7229 if ((flag_code == CODE_16BIT) ^ override)
7231 bigdisp.bitfield.disp32 = 0;
7232 bigdisp.bitfield.disp16 = 1;
7236 i.types[this_operand] = operand_type_or (i.types[this_operand],
7239 exp = &disp_expressions[i.disp_operands];
7240 i.op[this_operand].disps = exp;
7242 save_input_line_pointer = input_line_pointer;
7243 input_line_pointer = disp_start;
7244 END_STRING_AND_SAVE (disp_end);
7246 #ifndef GCC_ASM_O_HACK
7247 #define GCC_ASM_O_HACK 0
7250 END_STRING_AND_SAVE (disp_end + 1);
7251 if (i.types[this_operand].bitfield.baseIndex
7252 && displacement_string_end[-1] == '+')
7254 /* This hack is to avoid a warning when using the "o"
7255 constraint within gcc asm statements.
7258 #define _set_tssldt_desc(n,addr,limit,type) \
7259 __asm__ __volatile__ ( \
7261 "movw %w1,2+%0\n\t" \
7263 "movb %b1,4+%0\n\t" \
7264 "movb %4,5+%0\n\t" \
7265 "movb $0,6+%0\n\t" \
7266 "movb %h1,7+%0\n\t" \
7268 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7270 This works great except that the output assembler ends
7271 up looking a bit weird if it turns out that there is
7272 no offset. You end up producing code that looks like:
7285 So here we provide the missing zero. */
7287 *displacement_string_end = '0';
7290 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7291 if (gotfree_input_line)
7292 input_line_pointer = gotfree_input_line;
7294 exp_seg = expression (exp);
7297 if (*input_line_pointer)
7298 as_bad (_("junk `%s' after expression"), input_line_pointer);
7300 RESTORE_END_STRING (disp_end + 1);
7302 input_line_pointer = save_input_line_pointer;
7303 if (gotfree_input_line)
7305 free (gotfree_input_line);
7307 if (exp->X_op == O_constant || exp->X_op == O_register)
7308 exp->X_op = O_illegal;
7311 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7313 RESTORE_END_STRING (disp_end);
7319 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7320 i386_operand_type types, const char *disp_start)
7322 i386_operand_type bigdisp;
7325 /* We do this to make sure that the section symbol is in
7326 the symbol table. We will ultimately change the relocation
7327 to be relative to the beginning of the section. */
7328 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7329 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7330 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7332 if (exp->X_op != O_symbol)
7335 if (S_IS_LOCAL (exp->X_add_symbol)
7336 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7337 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7338 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7339 exp->X_op = O_subtract;
7340 exp->X_op_symbol = GOT_symbol;
7341 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7342 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7343 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7344 i.reloc[this_operand] = BFD_RELOC_64;
7346 i.reloc[this_operand] = BFD_RELOC_32;
7349 else if (exp->X_op == O_absent
7350 || exp->X_op == O_illegal
7351 || exp->X_op == O_big)
7354 as_bad (_("missing or invalid displacement expression `%s'"),
7359 else if (flag_code == CODE_64BIT
7360 && !i.prefix[ADDR_PREFIX]
7361 && exp->X_op == O_constant)
7363 /* Since displacement is signed extended to 64bit, don't allow
7364 disp32 and turn off disp32s if they are out of range. */
7365 i.types[this_operand].bitfield.disp32 = 0;
7366 if (!fits_in_signed_long (exp->X_add_number))
7368 i.types[this_operand].bitfield.disp32s = 0;
7369 if (i.types[this_operand].bitfield.baseindex)
7371 as_bad (_("0x%lx out range of signed 32bit displacement"),
7372 (long) exp->X_add_number);
7378 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7379 else if (exp->X_op != O_constant
7380 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7381 && exp_seg != absolute_section
7382 && exp_seg != text_section
7383 && exp_seg != data_section
7384 && exp_seg != bss_section
7385 && exp_seg != undefined_section
7386 && !bfd_is_com_section (exp_seg))
7388 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7393 /* Check if this is a displacement only operand. */
7394 bigdisp = i.types[this_operand];
7395 bigdisp.bitfield.disp8 = 0;
7396 bigdisp.bitfield.disp16 = 0;
7397 bigdisp.bitfield.disp32 = 0;
7398 bigdisp.bitfield.disp32s = 0;
7399 bigdisp.bitfield.disp64 = 0;
7400 if (operand_type_all_zero (&bigdisp))
7401 i.types[this_operand] = operand_type_and (i.types[this_operand],
7407 /* Make sure the memory operand we've been dealt is valid.
7408 Return 1 on success, 0 on a failure. */
7411 i386_index_check (const char *operand_string)
7414 const char *kind = "base/index";
7415 #if INFER_ADDR_PREFIX
7421 if (current_templates->start->opcode_modifier.isstring
7422 && !current_templates->start->opcode_modifier.immext
7423 && (current_templates->end[-1].opcode_modifier.isstring
7426 /* Memory operands of string insns are special in that they only allow
7427 a single register (rDI, rSI, or rBX) as their memory address. */
7428 unsigned int expected;
7430 kind = "string address";
7432 if (current_templates->start->opcode_modifier.w)
7434 i386_operand_type type = current_templates->end[-1].operand_types[0];
7436 if (!type.bitfield.baseindex
7437 || ((!i.mem_operands != !intel_syntax)
7438 && current_templates->end[-1].operand_types[1]
7439 .bitfield.baseindex))
7440 type = current_templates->end[-1].operand_types[1];
7441 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7444 expected = 3 /* rBX */;
7446 if (!i.base_reg || i.index_reg
7447 || operand_type_check (i.types[this_operand], disp))
7449 else if (!(flag_code == CODE_64BIT
7450 ? i.prefix[ADDR_PREFIX]
7451 ? i.base_reg->reg_type.bitfield.reg32
7452 : i.base_reg->reg_type.bitfield.reg64
7453 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7454 ? i.base_reg->reg_type.bitfield.reg32
7455 : i.base_reg->reg_type.bitfield.reg16))
7457 else if (register_number (i.base_reg) != expected)
7464 for (j = 0; j < i386_regtab_size; ++j)
7465 if ((flag_code == CODE_64BIT
7466 ? i.prefix[ADDR_PREFIX]
7467 ? i386_regtab[j].reg_type.bitfield.reg32
7468 : i386_regtab[j].reg_type.bitfield.reg64
7469 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7470 ? i386_regtab[j].reg_type.bitfield.reg32
7471 : i386_regtab[j].reg_type.bitfield.reg16)
7472 && register_number(i386_regtab + j) == expected)
7474 gas_assert (j < i386_regtab_size);
7475 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7477 intel_syntax ? '[' : '(',
7479 i386_regtab[j].reg_name,
7480 intel_syntax ? ']' : ')');
7484 else if (flag_code == CODE_64BIT)
7487 && ((i.prefix[ADDR_PREFIX] == 0
7488 && !i.base_reg->reg_type.bitfield.reg64)
7489 || (i.prefix[ADDR_PREFIX]
7490 && !i.base_reg->reg_type.bitfield.reg32))
7492 || i.base_reg->reg_num !=
7493 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7495 && !(i.index_reg->reg_type.bitfield.regxmm
7496 || i.index_reg->reg_type.bitfield.regymm)
7497 && (!i.index_reg->reg_type.bitfield.baseindex
7498 || (i.prefix[ADDR_PREFIX] == 0
7499 && i.index_reg->reg_num != RegRiz
7500 && !i.index_reg->reg_type.bitfield.reg64
7502 || (i.prefix[ADDR_PREFIX]
7503 && i.index_reg->reg_num != RegEiz
7504 && !i.index_reg->reg_type.bitfield.reg32))))
7509 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7513 && (!i.base_reg->reg_type.bitfield.reg16
7514 || !i.base_reg->reg_type.bitfield.baseindex))
7516 && (!i.index_reg->reg_type.bitfield.reg16
7517 || !i.index_reg->reg_type.bitfield.baseindex
7519 && i.base_reg->reg_num < 6
7520 && i.index_reg->reg_num >= 6
7521 && i.log2_scale_factor == 0))))
7528 && !i.base_reg->reg_type.bitfield.reg32)
7530 && !i.index_reg->reg_type.bitfield.regxmm
7531 && !i.index_reg->reg_type.bitfield.regymm
7532 && ((!i.index_reg->reg_type.bitfield.reg32
7533 && i.index_reg->reg_num != RegEiz)
7534 || !i.index_reg->reg_type.bitfield.baseindex)))
7540 #if INFER_ADDR_PREFIX
7541 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7543 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7545 /* Change the size of any displacement too. At most one of
7546 Disp16 or Disp32 is set.
7547 FIXME. There doesn't seem to be any real need for separate
7548 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7549 Removing them would probably clean up the code quite a lot. */
7550 if (flag_code != CODE_64BIT
7551 && (i.types[this_operand].bitfield.disp16
7552 || i.types[this_operand].bitfield.disp32))
7553 i.types[this_operand]
7554 = operand_type_xor (i.types[this_operand], disp16_32);
7559 as_bad (_("`%s' is not a valid %s expression"),
7564 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7566 flag_code_names[i.prefix[ADDR_PREFIX]
7567 ? flag_code == CODE_32BIT
7576 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7580 i386_att_operand (char *operand_string)
7584 char *op_string = operand_string;
7586 if (is_space_char (*op_string))
7589 /* We check for an absolute prefix (differentiating,
7590 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7591 if (*op_string == ABSOLUTE_PREFIX)
7594 if (is_space_char (*op_string))
7596 i.types[this_operand].bitfield.jumpabsolute = 1;
7599 /* Check if operand is a register. */
7600 if ((r = parse_register (op_string, &end_op)) != NULL)
7602 i386_operand_type temp;
7604 /* Check for a segment override by searching for ':' after a
7605 segment register. */
7607 if (is_space_char (*op_string))
7609 if (*op_string == ':'
7610 && (r->reg_type.bitfield.sreg2
7611 || r->reg_type.bitfield.sreg3))
7616 i.seg[i.mem_operands] = &es;
7619 i.seg[i.mem_operands] = &cs;
7622 i.seg[i.mem_operands] = &ss;
7625 i.seg[i.mem_operands] = &ds;
7628 i.seg[i.mem_operands] = &fs;
7631 i.seg[i.mem_operands] = &gs;
7635 /* Skip the ':' and whitespace. */
7637 if (is_space_char (*op_string))
7640 if (!is_digit_char (*op_string)
7641 && !is_identifier_char (*op_string)
7642 && *op_string != '('
7643 && *op_string != ABSOLUTE_PREFIX)
7645 as_bad (_("bad memory operand `%s'"), op_string);
7648 /* Handle case of %es:*foo. */
7649 if (*op_string == ABSOLUTE_PREFIX)
7652 if (is_space_char (*op_string))
7654 i.types[this_operand].bitfield.jumpabsolute = 1;
7656 goto do_memory_reference;
7660 as_bad (_("junk `%s' after register"), op_string);
7664 temp.bitfield.baseindex = 0;
7665 i.types[this_operand] = operand_type_or (i.types[this_operand],
7667 i.types[this_operand].bitfield.unspecified = 0;
7668 i.op[this_operand].regs = r;
7671 else if (*op_string == REGISTER_PREFIX)
7673 as_bad (_("bad register name `%s'"), op_string);
7676 else if (*op_string == IMMEDIATE_PREFIX)
7679 if (i.types[this_operand].bitfield.jumpabsolute)
7681 as_bad (_("immediate operand illegal with absolute jump"));
7684 if (!i386_immediate (op_string))
7687 else if (is_digit_char (*op_string)
7688 || is_identifier_char (*op_string)
7689 || *op_string == '(')
7691 /* This is a memory reference of some sort. */
7694 /* Start and end of displacement string expression (if found). */
7695 char *displacement_string_start;
7696 char *displacement_string_end;
7698 do_memory_reference:
7699 if ((i.mem_operands == 1
7700 && !current_templates->start->opcode_modifier.isstring)
7701 || i.mem_operands == 2)
7703 as_bad (_("too many memory references for `%s'"),
7704 current_templates->start->name);
7708 /* Check for base index form. We detect the base index form by
7709 looking for an ')' at the end of the operand, searching
7710 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7712 base_string = op_string + strlen (op_string);
7715 if (is_space_char (*base_string))
7718 /* If we only have a displacement, set-up for it to be parsed later. */
7719 displacement_string_start = op_string;
7720 displacement_string_end = base_string + 1;
7722 if (*base_string == ')')
7725 unsigned int parens_balanced = 1;
7726 /* We've already checked that the number of left & right ()'s are
7727 equal, so this loop will not be infinite. */
7731 if (*base_string == ')')
7733 if (*base_string == '(')
7736 while (parens_balanced);
7738 temp_string = base_string;
7740 /* Skip past '(' and whitespace. */
7742 if (is_space_char (*base_string))
7745 if (*base_string == ','
7746 || ((i.base_reg = parse_register (base_string, &end_op))
7749 displacement_string_end = temp_string;
7751 i.types[this_operand].bitfield.baseindex = 1;
7755 base_string = end_op;
7756 if (is_space_char (*base_string))
7760 /* There may be an index reg or scale factor here. */
7761 if (*base_string == ',')
7764 if (is_space_char (*base_string))
7767 if ((i.index_reg = parse_register (base_string, &end_op))
7770 base_string = end_op;
7771 if (is_space_char (*base_string))
7773 if (*base_string == ',')
7776 if (is_space_char (*base_string))
7779 else if (*base_string != ')')
7781 as_bad (_("expecting `,' or `)' "
7782 "after index register in `%s'"),
7787 else if (*base_string == REGISTER_PREFIX)
7789 as_bad (_("bad register name `%s'"), base_string);
7793 /* Check for scale factor. */
7794 if (*base_string != ')')
7796 char *end_scale = i386_scale (base_string);
7801 base_string = end_scale;
7802 if (is_space_char (*base_string))
7804 if (*base_string != ')')
7806 as_bad (_("expecting `)' "
7807 "after scale factor in `%s'"),
7812 else if (!i.index_reg)
7814 as_bad (_("expecting index register or scale factor "
7815 "after `,'; got '%c'"),
7820 else if (*base_string != ')')
7822 as_bad (_("expecting `,' or `)' "
7823 "after base register in `%s'"),
7828 else if (*base_string == REGISTER_PREFIX)
7830 as_bad (_("bad register name `%s'"), base_string);
7835 /* If there's an expression beginning the operand, parse it,
7836 assuming displacement_string_start and
7837 displacement_string_end are meaningful. */
7838 if (displacement_string_start != displacement_string_end)
7840 if (!i386_displacement (displacement_string_start,
7841 displacement_string_end))
7845 /* Special case for (%dx) while doing input/output op. */
7847 && operand_type_equal (&i.base_reg->reg_type,
7848 ®16_inoutportreg)
7850 && i.log2_scale_factor == 0
7851 && i.seg[i.mem_operands] == 0
7852 && !operand_type_check (i.types[this_operand], disp))
7854 i.types[this_operand] = inoutportreg;
7858 if (i386_index_check (operand_string) == 0)
7860 i.types[this_operand].bitfield.mem = 1;
7865 /* It's not a memory operand; argh! */
7866 as_bad (_("invalid char %s beginning operand %d `%s'"),
7867 output_invalid (*op_string),
7872 return 1; /* Normal return. */
7875 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7876 that an rs_machine_dependent frag may reach. */
7879 i386_frag_max_var (fragS *frag)
7881 /* The only relaxable frags are for jumps.
7882 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7883 gas_assert (frag->fr_type == rs_machine_dependent);
7884 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7887 /* md_estimate_size_before_relax()
7889 Called just before relax() for rs_machine_dependent frags. The x86
7890 assembler uses these frags to handle variable size jump
7893 Any symbol that is now undefined will not become defined.
7894 Return the correct fr_subtype in the frag.
7895 Return the initial "guess for variable size of frag" to caller.
7896 The guess is actually the growth beyond the fixed part. Whatever
7897 we do to grow the fixed or variable part contributes to our
7901 md_estimate_size_before_relax (fragS *fragP, segT segment)
7903 /* We've already got fragP->fr_subtype right; all we have to do is
7904 check for un-relaxable symbols. On an ELF system, we can't relax
7905 an externally visible symbol, because it may be overridden by a
7907 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7908 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7910 && (S_IS_EXTERNAL (fragP->fr_symbol)
7911 || S_IS_WEAK (fragP->fr_symbol)
7912 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7913 & BSF_GNU_INDIRECT_FUNCTION))))
7915 #if defined (OBJ_COFF) && defined (TE_PE)
7916 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7917 && S_IS_WEAK (fragP->fr_symbol))
7921 /* Symbol is undefined in this segment, or we need to keep a
7922 reloc so that weak symbols can be overridden. */
7923 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7924 enum bfd_reloc_code_real reloc_type;
7925 unsigned char *opcode;
7928 if (fragP->fr_var != NO_RELOC)
7929 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7931 reloc_type = BFD_RELOC_16_PCREL;
7933 reloc_type = BFD_RELOC_32_PCREL;
7935 old_fr_fix = fragP->fr_fix;
7936 opcode = (unsigned char *) fragP->fr_opcode;
7938 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7941 /* Make jmp (0xeb) a (d)word displacement jump. */
7943 fragP->fr_fix += size;
7944 fix_new (fragP, old_fr_fix, size,
7946 fragP->fr_offset, 1,
7952 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7954 /* Negate the condition, and branch past an
7955 unconditional jump. */
7958 /* Insert an unconditional jump. */
7960 /* We added two extra opcode bytes, and have a two byte
7962 fragP->fr_fix += 2 + 2;
7963 fix_new (fragP, old_fr_fix + 2, 2,
7965 fragP->fr_offset, 1,
7972 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7977 fixP = fix_new (fragP, old_fr_fix, 1,
7979 fragP->fr_offset, 1,
7981 fixP->fx_signed = 1;
7985 /* This changes the byte-displacement jump 0x7N
7986 to the (d)word-displacement jump 0x0f,0x8N. */
7987 opcode[1] = opcode[0] + 0x10;
7988 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7989 /* We've added an opcode byte. */
7990 fragP->fr_fix += 1 + size;
7991 fix_new (fragP, old_fr_fix + 1, size,
7993 fragP->fr_offset, 1,
7998 BAD_CASE (fragP->fr_subtype);
8002 return fragP->fr_fix - old_fr_fix;
8005 /* Guess size depending on current relax state. Initially the relax
8006 state will correspond to a short jump and we return 1, because
8007 the variable part of the frag (the branch offset) is one byte
8008 long. However, we can relax a section more than once and in that
8009 case we must either set fr_subtype back to the unrelaxed state,
8010 or return the value for the appropriate branch. */
8011 return md_relax_table[fragP->fr_subtype].rlx_length;
8014 /* Called after relax() is finished.
8016 In: Address of frag.
8017 fr_type == rs_machine_dependent.
8018 fr_subtype is what the address relaxed to.
8020 Out: Any fixSs and constants are set up.
8021 Caller will turn frag into a ".space 0". */
8024 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
8027 unsigned char *opcode;
8028 unsigned char *where_to_put_displacement = NULL;
8029 offsetT target_address;
8030 offsetT opcode_address;
8031 unsigned int extension = 0;
8032 offsetT displacement_from_opcode_start;
8034 opcode = (unsigned char *) fragP->fr_opcode;
8036 /* Address we want to reach in file space. */
8037 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
8039 /* Address opcode resides at in file space. */
8040 opcode_address = fragP->fr_address + fragP->fr_fix;
8042 /* Displacement from opcode start to fill into instruction. */
8043 displacement_from_opcode_start = target_address - opcode_address;
8045 if ((fragP->fr_subtype & BIG) == 0)
8047 /* Don't have to change opcode. */
8048 extension = 1; /* 1 opcode + 1 displacement */
8049 where_to_put_displacement = &opcode[1];
8053 if (no_cond_jump_promotion
8054 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
8055 as_warn_where (fragP->fr_file, fragP->fr_line,
8056 _("long jump required"));
8058 switch (fragP->fr_subtype)
8060 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
8061 extension = 4; /* 1 opcode + 4 displacement */
8063 where_to_put_displacement = &opcode[1];
8066 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
8067 extension = 2; /* 1 opcode + 2 displacement */
8069 where_to_put_displacement = &opcode[1];
8072 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
8073 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
8074 extension = 5; /* 2 opcode + 4 displacement */
8075 opcode[1] = opcode[0] + 0x10;
8076 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8077 where_to_put_displacement = &opcode[2];
8080 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
8081 extension = 3; /* 2 opcode + 2 displacement */
8082 opcode[1] = opcode[0] + 0x10;
8083 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
8084 where_to_put_displacement = &opcode[2];
8087 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
8092 where_to_put_displacement = &opcode[3];
8096 BAD_CASE (fragP->fr_subtype);
8101 /* If size if less then four we are sure that the operand fits,
8102 but if it's 4, then it could be that the displacement is larger
8104 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
8106 && ((addressT) (displacement_from_opcode_start - extension
8107 + ((addressT) 1 << 31))
8108 > (((addressT) 2 << 31) - 1)))
8110 as_bad_where (fragP->fr_file, fragP->fr_line,
8111 _("jump target out of range"));
8112 /* Make us emit 0. */
8113 displacement_from_opcode_start = extension;
8115 /* Now put displacement after opcode. */
8116 md_number_to_chars ((char *) where_to_put_displacement,
8117 (valueT) (displacement_from_opcode_start - extension),
8118 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
8119 fragP->fr_fix += extension;
8122 /* Apply a fixup (fixP) to segment data, once it has been determined
8123 by our caller that we have all the info we need to fix it up.
8125 Parameter valP is the pointer to the value of the bits.
8127 On the 386, immediates, displacements, and data pointers are all in
8128 the same (little-endian) format, so we don't need to care about which
8132 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
8134 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
8135 valueT value = *valP;
8137 #if !defined (TE_Mach)
8140 switch (fixP->fx_r_type)
8146 fixP->fx_r_type = BFD_RELOC_64_PCREL;
8149 case BFD_RELOC_X86_64_32S:
8150 fixP->fx_r_type = BFD_RELOC_32_PCREL;
8153 fixP->fx_r_type = BFD_RELOC_16_PCREL;
8156 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8161 if (fixP->fx_addsy != NULL
8162 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8163 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8164 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8165 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8166 && !use_rela_relocations)
8168 /* This is a hack. There should be a better way to handle this.
8169 This covers for the fact that bfd_install_relocation will
8170 subtract the current location (for partial_inplace, PC relative
8171 relocations); see more below. */
8175 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8178 value += fixP->fx_where + fixP->fx_frag->fr_address;
8180 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8183 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8186 || (symbol_section_p (fixP->fx_addsy)
8187 && sym_seg != absolute_section))
8188 && !generic_force_reloc (fixP))
8190 /* Yes, we add the values in twice. This is because
8191 bfd_install_relocation subtracts them out again. I think
8192 bfd_install_relocation is broken, but I don't dare change
8194 value += fixP->fx_where + fixP->fx_frag->fr_address;
8198 #if defined (OBJ_COFF) && defined (TE_PE)
8199 /* For some reason, the PE format does not store a
8200 section address offset for a PC relative symbol. */
8201 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8202 || S_IS_WEAK (fixP->fx_addsy))
8203 value += md_pcrel_from (fixP);
8206 #if defined (OBJ_COFF) && defined (TE_PE)
8207 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8209 value -= S_GET_VALUE (fixP->fx_addsy);
8213 /* Fix a few things - the dynamic linker expects certain values here,
8214 and we must not disappoint it. */
8215 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8216 if (IS_ELF && fixP->fx_addsy)
8217 switch (fixP->fx_r_type)
8219 case BFD_RELOC_386_PLT32:
8220 case BFD_RELOC_X86_64_PLT32:
8221 /* Make the jump instruction point to the address of the operand. At
8222 runtime we merely add the offset to the actual PLT entry. */
8226 case BFD_RELOC_386_TLS_GD:
8227 case BFD_RELOC_386_TLS_LDM:
8228 case BFD_RELOC_386_TLS_IE_32:
8229 case BFD_RELOC_386_TLS_IE:
8230 case BFD_RELOC_386_TLS_GOTIE:
8231 case BFD_RELOC_386_TLS_GOTDESC:
8232 case BFD_RELOC_X86_64_TLSGD:
8233 case BFD_RELOC_X86_64_TLSLD:
8234 case BFD_RELOC_X86_64_GOTTPOFF:
8235 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8236 value = 0; /* Fully resolved at runtime. No addend. */
8238 case BFD_RELOC_386_TLS_LE:
8239 case BFD_RELOC_386_TLS_LDO_32:
8240 case BFD_RELOC_386_TLS_LE_32:
8241 case BFD_RELOC_X86_64_DTPOFF32:
8242 case BFD_RELOC_X86_64_DTPOFF64:
8243 case BFD_RELOC_X86_64_TPOFF32:
8244 case BFD_RELOC_X86_64_TPOFF64:
8245 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8248 case BFD_RELOC_386_TLS_DESC_CALL:
8249 case BFD_RELOC_X86_64_TLSDESC_CALL:
8250 value = 0; /* Fully resolved at runtime. No addend. */
8251 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8255 case BFD_RELOC_386_GOT32:
8256 case BFD_RELOC_X86_64_GOT32:
8257 value = 0; /* Fully resolved at runtime. No addend. */
8260 case BFD_RELOC_VTABLE_INHERIT:
8261 case BFD_RELOC_VTABLE_ENTRY:
8268 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8270 #endif /* !defined (TE_Mach) */
8272 /* Are we finished with this relocation now? */
8273 if (fixP->fx_addsy == NULL)
8275 #if defined (OBJ_COFF) && defined (TE_PE)
8276 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8279 /* Remember value for tc_gen_reloc. */
8280 fixP->fx_addnumber = value;
8281 /* Clear out the frag for now. */
8285 else if (use_rela_relocations)
8287 fixP->fx_no_overflow = 1;
8288 /* Remember value for tc_gen_reloc. */
8289 fixP->fx_addnumber = value;
8293 md_number_to_chars (p, value, fixP->fx_size);
8297 md_atof (int type, char *litP, int *sizeP)
8299 /* This outputs the LITTLENUMs in REVERSE order;
8300 in accord with the bigendian 386. */
8301 return ieee_md_atof (type, litP, sizeP, FALSE);
8304 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8307 output_invalid (int c)
8310 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8313 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8314 "(0x%x)", (unsigned char) c);
8315 return output_invalid_buf;
8318 /* REG_STRING starts *before* REGISTER_PREFIX. */
8320 static const reg_entry *
8321 parse_real_register (char *reg_string, char **end_op)
8323 char *s = reg_string;
8325 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8328 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8329 if (*s == REGISTER_PREFIX)
8332 if (is_space_char (*s))
8336 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8338 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8339 return (const reg_entry *) NULL;
8343 /* For naked regs, make sure that we are not dealing with an identifier.
8344 This prevents confusing an identifier like `eax_var' with register
8346 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8347 return (const reg_entry *) NULL;
8351 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8353 /* Handle floating point regs, allowing spaces in the (i) part. */
8354 if (r == i386_regtab /* %st is first entry of table */)
8356 if (is_space_char (*s))
8361 if (is_space_char (*s))
8363 if (*s >= '0' && *s <= '7')
8367 if (is_space_char (*s))
8372 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8377 /* We have "%st(" then garbage. */
8378 return (const reg_entry *) NULL;
8382 if (r == NULL || allow_pseudo_reg)
8385 if (operand_type_all_zero (&r->reg_type))
8386 return (const reg_entry *) NULL;
8388 if ((r->reg_type.bitfield.reg32
8389 || r->reg_type.bitfield.sreg3
8390 || r->reg_type.bitfield.control
8391 || r->reg_type.bitfield.debug
8392 || r->reg_type.bitfield.test)
8393 && !cpu_arch_flags.bitfield.cpui386)
8394 return (const reg_entry *) NULL;
8396 if (r->reg_type.bitfield.floatreg
8397 && !cpu_arch_flags.bitfield.cpu8087
8398 && !cpu_arch_flags.bitfield.cpu287
8399 && !cpu_arch_flags.bitfield.cpu387)
8400 return (const reg_entry *) NULL;
8402 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8403 return (const reg_entry *) NULL;
8405 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8406 return (const reg_entry *) NULL;
8408 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8409 return (const reg_entry *) NULL;
8411 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8412 if (!allow_index_reg
8413 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8414 return (const reg_entry *) NULL;
8416 if (((r->reg_flags & (RegRex64 | RegRex))
8417 || r->reg_type.bitfield.reg64)
8418 && (!cpu_arch_flags.bitfield.cpulm
8419 || !operand_type_equal (&r->reg_type, &control))
8420 && flag_code != CODE_64BIT)
8421 return (const reg_entry *) NULL;
8423 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8424 return (const reg_entry *) NULL;
8429 /* REG_STRING starts *before* REGISTER_PREFIX. */
8431 static const reg_entry *
8432 parse_register (char *reg_string, char **end_op)
8436 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8437 r = parse_real_register (reg_string, end_op);
8442 char *save = input_line_pointer;
8446 input_line_pointer = reg_string;
8447 c = get_symbol_end ();
8448 symbolP = symbol_find (reg_string);
8449 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8451 const expressionS *e = symbol_get_value_expression (symbolP);
8453 know (e->X_op == O_register);
8454 know (e->X_add_number >= 0
8455 && (valueT) e->X_add_number < i386_regtab_size);
8456 r = i386_regtab + e->X_add_number;
8457 *end_op = input_line_pointer;
8459 *input_line_pointer = c;
8460 input_line_pointer = save;
8466 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8469 char *end = input_line_pointer;
8472 r = parse_register (name, &input_line_pointer);
8473 if (r && end <= input_line_pointer)
8475 *nextcharP = *input_line_pointer;
8476 *input_line_pointer = 0;
8477 e->X_op = O_register;
8478 e->X_add_number = r - i386_regtab;
8481 input_line_pointer = end;
8483 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8487 md_operand (expressionS *e)
8492 switch (*input_line_pointer)
8494 case REGISTER_PREFIX:
8495 r = parse_real_register (input_line_pointer, &end);
8498 e->X_op = O_register;
8499 e->X_add_number = r - i386_regtab;
8500 input_line_pointer = end;
8505 gas_assert (intel_syntax);
8506 end = input_line_pointer++;
8508 if (*input_line_pointer == ']')
8510 ++input_line_pointer;
8511 e->X_op_symbol = make_expr_symbol (e);
8512 e->X_add_symbol = NULL;
8513 e->X_add_number = 0;
8519 input_line_pointer = end;
8526 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8527 const char *md_shortopts = "kVQ:sqn";
8529 const char *md_shortopts = "qn";
8532 #define OPTION_32 (OPTION_MD_BASE + 0)
8533 #define OPTION_64 (OPTION_MD_BASE + 1)
8534 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8535 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8536 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8537 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8538 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8539 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8540 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8541 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8542 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8543 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8544 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
8545 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
8546 #define OPTION_X32 (OPTION_MD_BASE + 14)
8548 struct option md_longopts[] =
8550 {"32", no_argument, NULL, OPTION_32},
8551 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8552 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8553 {"64", no_argument, NULL, OPTION_64},
8555 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8556 {"x32", no_argument, NULL, OPTION_X32},
8558 {"divide", no_argument, NULL, OPTION_DIVIDE},
8559 {"march", required_argument, NULL, OPTION_MARCH},
8560 {"mtune", required_argument, NULL, OPTION_MTUNE},
8561 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8562 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8563 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8564 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8565 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8566 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8567 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8568 {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
8569 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8570 {NULL, no_argument, NULL, 0}
8572 size_t md_longopts_size = sizeof (md_longopts);
8575 md_parse_option (int c, char *arg)
8583 optimize_align_code = 0;
8590 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8591 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8592 should be emitted or not. FIXME: Not implemented. */
8596 /* -V: SVR4 argument to print version ID. */
8598 print_version_id ();
8601 /* -k: Ignore for FreeBSD compatibility. */
8606 /* -s: On i386 Solaris, this tells the native assembler to use
8607 .stab instead of .stab.excl. We always use .stab anyhow. */
8610 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8611 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8614 const char **list, **l;
8616 list = bfd_target_list ();
8617 for (l = list; *l != NULL; l++)
8618 if (CONST_STRNEQ (*l, "elf64-x86-64")
8619 || strcmp (*l, "coff-x86-64") == 0
8620 || strcmp (*l, "pe-x86-64") == 0
8621 || strcmp (*l, "pei-x86-64") == 0
8622 || strcmp (*l, "mach-o-x86-64") == 0)
8624 default_arch = "x86_64";
8628 as_fatal (_("no compiled in support for x86_64"));
8634 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8638 const char **list, **l;
8640 list = bfd_target_list ();
8641 for (l = list; *l != NULL; l++)
8642 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8644 default_arch = "x86_64:32";
8648 as_fatal (_("no compiled in support for 32bit x86_64"));
8652 as_fatal (_("32bit x86_64 is only supported for ELF"));
8657 default_arch = "i386";
8661 #ifdef SVR4_COMMENT_CHARS
8666 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8668 for (s = i386_comment_chars; *s != '\0'; s++)
8672 i386_comment_chars = n;
8678 arch = xstrdup (arg);
8682 as_fatal (_("invalid -march= option: `%s'"), arg);
8683 next = strchr (arch, '+');
8686 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8688 if (strcmp (arch, cpu_arch [j].name) == 0)
8691 if (! cpu_arch[j].flags.bitfield.cpui386)
8694 cpu_arch_name = cpu_arch[j].name;
8695 cpu_sub_arch_name = NULL;
8696 cpu_arch_flags = cpu_arch[j].flags;
8697 cpu_arch_isa = cpu_arch[j].type;
8698 cpu_arch_isa_flags = cpu_arch[j].flags;
8699 if (!cpu_arch_tune_set)
8701 cpu_arch_tune = cpu_arch_isa;
8702 cpu_arch_tune_flags = cpu_arch_isa_flags;
8706 else if (*cpu_arch [j].name == '.'
8707 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8709 /* ISA entension. */
8710 i386_cpu_flags flags;
8712 if (!cpu_arch[j].negated)
8713 flags = cpu_flags_or (cpu_arch_flags,
8716 flags = cpu_flags_and_not (cpu_arch_flags,
8718 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8720 if (cpu_sub_arch_name)
8722 char *name = cpu_sub_arch_name;
8723 cpu_sub_arch_name = concat (name,
8725 (const char *) NULL);
8729 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8730 cpu_arch_flags = flags;
8731 cpu_arch_isa_flags = flags;
8737 if (j >= ARRAY_SIZE (cpu_arch))
8738 as_fatal (_("invalid -march= option: `%s'"), arg);
8742 while (next != NULL );
8747 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8748 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8750 if (strcmp (arg, cpu_arch [j].name) == 0)
8752 cpu_arch_tune_set = 1;
8753 cpu_arch_tune = cpu_arch [j].type;
8754 cpu_arch_tune_flags = cpu_arch[j].flags;
8758 if (j >= ARRAY_SIZE (cpu_arch))
8759 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8762 case OPTION_MMNEMONIC:
8763 if (strcasecmp (arg, "att") == 0)
8765 else if (strcasecmp (arg, "intel") == 0)
8768 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8771 case OPTION_MSYNTAX:
8772 if (strcasecmp (arg, "att") == 0)
8774 else if (strcasecmp (arg, "intel") == 0)
8777 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8780 case OPTION_MINDEX_REG:
8781 allow_index_reg = 1;
8784 case OPTION_MNAKED_REG:
8785 allow_naked_reg = 1;
8788 case OPTION_MOLD_GCC:
8792 case OPTION_MSSE2AVX:
8796 case OPTION_MSSE_CHECK:
8797 if (strcasecmp (arg, "error") == 0)
8798 sse_check = check_error;
8799 else if (strcasecmp (arg, "warning") == 0)
8800 sse_check = check_warning;
8801 else if (strcasecmp (arg, "none") == 0)
8802 sse_check = check_none;
8804 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8807 case OPTION_MOPERAND_CHECK:
8808 if (strcasecmp (arg, "error") == 0)
8809 operand_check = check_error;
8810 else if (strcasecmp (arg, "warning") == 0)
8811 operand_check = check_warning;
8812 else if (strcasecmp (arg, "none") == 0)
8813 operand_check = check_none;
8815 as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
8818 case OPTION_MAVXSCALAR:
8819 if (strcasecmp (arg, "128") == 0)
8821 else if (strcasecmp (arg, "256") == 0)
8824 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8833 #define MESSAGE_TEMPLATE \
8837 show_arch (FILE *stream, int ext, int check)
8839 static char message[] = MESSAGE_TEMPLATE;
8840 char *start = message + 27;
8842 int size = sizeof (MESSAGE_TEMPLATE);
8849 left = size - (start - message);
8850 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8852 /* Should it be skipped? */
8853 if (cpu_arch [j].skip)
8856 name = cpu_arch [j].name;
8857 len = cpu_arch [j].len;
8860 /* It is an extension. Skip if we aren't asked to show it. */
8871 /* It is an processor. Skip if we show only extension. */
8874 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8876 /* It is an impossible processor - skip. */
8880 /* Reserve 2 spaces for ", " or ",\0" */
8883 /* Check if there is any room. */
8891 p = mempcpy (p, name, len);
8895 /* Output the current message now and start a new one. */
8898 fprintf (stream, "%s\n", message);
8900 left = size - (start - message) - len - 2;
8902 gas_assert (left >= 0);
8904 p = mempcpy (p, name, len);
8909 fprintf (stream, "%s\n", message);
8913 md_show_usage (FILE *stream)
8915 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8916 fprintf (stream, _("\
8918 -V print assembler version number\n\
8921 fprintf (stream, _("\
8922 -n Do not optimize code alignment\n\
8923 -q quieten some warnings\n"));
8924 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8925 fprintf (stream, _("\
8928 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8929 || defined (TE_PE) || defined (TE_PEP))
8930 fprintf (stream, _("\
8931 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8933 #ifdef SVR4_COMMENT_CHARS
8934 fprintf (stream, _("\
8935 --divide do not treat `/' as a comment character\n"));
8937 fprintf (stream, _("\
8938 --divide ignored\n"));
8940 fprintf (stream, _("\
8941 -march=CPU[,+EXTENSION...]\n\
8942 generate code for CPU and EXTENSION, CPU is one of:\n"));
8943 show_arch (stream, 0, 1);
8944 fprintf (stream, _("\
8945 EXTENSION is combination of:\n"));
8946 show_arch (stream, 1, 0);
8947 fprintf (stream, _("\
8948 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8949 show_arch (stream, 0, 0);
8950 fprintf (stream, _("\
8951 -msse2avx encode SSE instructions with VEX prefix\n"));
8952 fprintf (stream, _("\
8953 -msse-check=[none|error|warning]\n\
8954 check SSE instructions\n"));
8955 fprintf (stream, _("\
8956 -moperand-check=[none|error|warning]\n\
8957 check operand combinations for validity\n"));
8958 fprintf (stream, _("\
8959 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8961 fprintf (stream, _("\
8962 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8963 fprintf (stream, _("\
8964 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8965 fprintf (stream, _("\
8966 -mindex-reg support pseudo index registers\n"));
8967 fprintf (stream, _("\
8968 -mnaked-reg don't require `%%' prefix for registers\n"));
8969 fprintf (stream, _("\
8970 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8973 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8974 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8975 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8977 /* Pick the target format to use. */
8980 i386_target_format (void)
8982 if (!strncmp (default_arch, "x86_64", 6))
8984 update_code_flag (CODE_64BIT, 1);
8985 if (default_arch[6] == '\0')
8986 x86_elf_abi = X86_64_ABI;
8988 x86_elf_abi = X86_64_X32_ABI;
8990 else if (!strcmp (default_arch, "i386"))
8991 update_code_flag (CODE_32BIT, 1);
8993 as_fatal (_("unknown architecture"));
8995 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8996 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8997 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8998 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
9000 switch (OUTPUT_FLAVOR)
9002 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
9003 case bfd_target_aout_flavour:
9004 return AOUT_TARGET_FORMAT;
9006 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
9007 # if defined (TE_PE) || defined (TE_PEP)
9008 case bfd_target_coff_flavour:
9009 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
9010 # elif defined (TE_GO32)
9011 case bfd_target_coff_flavour:
9014 case bfd_target_coff_flavour:
9018 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9019 case bfd_target_elf_flavour:
9023 switch (x86_elf_abi)
9026 format = ELF_TARGET_FORMAT;
9029 use_rela_relocations = 1;
9031 format = ELF_TARGET_FORMAT64;
9033 case X86_64_X32_ABI:
9034 use_rela_relocations = 1;
9036 disallow_64bit_reloc = 1;
9037 format = ELF_TARGET_FORMAT32;
9040 if (cpu_arch_isa == PROCESSOR_L1OM)
9042 if (x86_elf_abi != X86_64_ABI)
9043 as_fatal (_("Intel L1OM is 64bit only"));
9044 return ELF_TARGET_L1OM_FORMAT;
9046 if (cpu_arch_isa == PROCESSOR_K1OM)
9048 if (x86_elf_abi != X86_64_ABI)
9049 as_fatal (_("Intel K1OM is 64bit only"));
9050 return ELF_TARGET_K1OM_FORMAT;
9056 #if defined (OBJ_MACH_O)
9057 case bfd_target_mach_o_flavour:
9058 if (flag_code == CODE_64BIT)
9060 use_rela_relocations = 1;
9062 return "mach-o-x86-64";
9065 return "mach-o-i386";
9073 #endif /* OBJ_MAYBE_ more than one */
9075 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
9077 i386_elf_emit_arch_note (void)
9079 if (IS_ELF && cpu_arch_name != NULL)
9082 asection *seg = now_seg;
9083 subsegT subseg = now_subseg;
9084 Elf_Internal_Note i_note;
9085 Elf_External_Note e_note;
9086 asection *note_secp;
9089 /* Create the .note section. */
9090 note_secp = subseg_new (".note", 0);
9091 bfd_set_section_flags (stdoutput,
9093 SEC_HAS_CONTENTS | SEC_READONLY);
9095 /* Process the arch string. */
9096 len = strlen (cpu_arch_name);
9098 i_note.namesz = len + 1;
9100 i_note.type = NT_ARCH;
9101 p = frag_more (sizeof (e_note.namesz));
9102 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
9103 p = frag_more (sizeof (e_note.descsz));
9104 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
9105 p = frag_more (sizeof (e_note.type));
9106 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
9107 p = frag_more (len + 1);
9108 strcpy (p, cpu_arch_name);
9110 frag_align (2, 0, 0);
9112 subseg_set (seg, subseg);
9118 md_undefined_symbol (char *name)
9120 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
9121 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
9122 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
9123 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
9127 if (symbol_find (name))
9128 as_bad (_("GOT already in symbol table"));
9129 GOT_symbol = symbol_new (name, undefined_section,
9130 (valueT) 0, &zero_address_frag);
9137 /* Round up a section size to the appropriate boundary. */
9140 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
9142 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
9143 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
9145 /* For a.out, force the section size to be aligned. If we don't do
9146 this, BFD will align it for us, but it will not write out the
9147 final bytes of the section. This may be a bug in BFD, but it is
9148 easier to fix it here since that is how the other a.out targets
9152 align = bfd_get_section_alignment (stdoutput, segment);
9153 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
9160 /* On the i386, PC-relative offsets are relative to the start of the
9161 next instruction. That is, the address of the offset, plus its
9162 size, since the offset is always the last part of the insn. */
9165 md_pcrel_from (fixS *fixP)
9167 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9173 s_bss (int ignore ATTRIBUTE_UNUSED)
9177 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9179 obj_elf_section_change_hook ();
9181 temp = get_absolute_expression ();
9182 subseg_set (bss_section, (subsegT) temp);
9183 demand_empty_rest_of_line ();
9189 i386_validate_fix (fixS *fixp)
9191 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9193 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9197 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9202 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9204 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9211 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9214 bfd_reloc_code_real_type code;
9216 switch (fixp->fx_r_type)
9218 case BFD_RELOC_X86_64_PLT32:
9219 case BFD_RELOC_X86_64_GOT32:
9220 case BFD_RELOC_X86_64_GOTPCREL:
9221 case BFD_RELOC_386_PLT32:
9222 case BFD_RELOC_386_GOT32:
9223 case BFD_RELOC_386_GOTOFF:
9224 case BFD_RELOC_386_GOTPC:
9225 case BFD_RELOC_386_TLS_GD:
9226 case BFD_RELOC_386_TLS_LDM:
9227 case BFD_RELOC_386_TLS_LDO_32:
9228 case BFD_RELOC_386_TLS_IE_32:
9229 case BFD_RELOC_386_TLS_IE:
9230 case BFD_RELOC_386_TLS_GOTIE:
9231 case BFD_RELOC_386_TLS_LE_32:
9232 case BFD_RELOC_386_TLS_LE:
9233 case BFD_RELOC_386_TLS_GOTDESC:
9234 case BFD_RELOC_386_TLS_DESC_CALL:
9235 case BFD_RELOC_X86_64_TLSGD:
9236 case BFD_RELOC_X86_64_TLSLD:
9237 case BFD_RELOC_X86_64_DTPOFF32:
9238 case BFD_RELOC_X86_64_DTPOFF64:
9239 case BFD_RELOC_X86_64_GOTTPOFF:
9240 case BFD_RELOC_X86_64_TPOFF32:
9241 case BFD_RELOC_X86_64_TPOFF64:
9242 case BFD_RELOC_X86_64_GOTOFF64:
9243 case BFD_RELOC_X86_64_GOTPC32:
9244 case BFD_RELOC_X86_64_GOT64:
9245 case BFD_RELOC_X86_64_GOTPCREL64:
9246 case BFD_RELOC_X86_64_GOTPC64:
9247 case BFD_RELOC_X86_64_GOTPLT64:
9248 case BFD_RELOC_X86_64_PLTOFF64:
9249 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9250 case BFD_RELOC_X86_64_TLSDESC_CALL:
9252 case BFD_RELOC_VTABLE_ENTRY:
9253 case BFD_RELOC_VTABLE_INHERIT:
9255 case BFD_RELOC_32_SECREL:
9257 code = fixp->fx_r_type;
9259 case BFD_RELOC_X86_64_32S:
9260 if (!fixp->fx_pcrel)
9262 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9263 code = fixp->fx_r_type;
9269 switch (fixp->fx_size)
9272 as_bad_where (fixp->fx_file, fixp->fx_line,
9273 _("can not do %d byte pc-relative relocation"),
9275 code = BFD_RELOC_32_PCREL;
9277 case 1: code = BFD_RELOC_8_PCREL; break;
9278 case 2: code = BFD_RELOC_16_PCREL; break;
9279 case 4: code = BFD_RELOC_32_PCREL; break;
9281 case 8: code = BFD_RELOC_64_PCREL; break;
9287 switch (fixp->fx_size)
9290 as_bad_where (fixp->fx_file, fixp->fx_line,
9291 _("can not do %d byte relocation"),
9293 code = BFD_RELOC_32;
9295 case 1: code = BFD_RELOC_8; break;
9296 case 2: code = BFD_RELOC_16; break;
9297 case 4: code = BFD_RELOC_32; break;
9299 case 8: code = BFD_RELOC_64; break;
9306 if ((code == BFD_RELOC_32
9307 || code == BFD_RELOC_32_PCREL
9308 || code == BFD_RELOC_X86_64_32S)
9310 && fixp->fx_addsy == GOT_symbol)
9313 code = BFD_RELOC_386_GOTPC;
9315 code = BFD_RELOC_X86_64_GOTPC32;
9317 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9319 && fixp->fx_addsy == GOT_symbol)
9321 code = BFD_RELOC_X86_64_GOTPC64;
9324 rel = (arelent *) xmalloc (sizeof (arelent));
9325 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9326 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9328 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9330 if (!use_rela_relocations)
9332 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9333 vtable entry to be used in the relocation's section offset. */
9334 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9335 rel->address = fixp->fx_offset;
9336 #if defined (OBJ_COFF) && defined (TE_PE)
9337 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9338 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9343 /* Use the rela in 64bit mode. */
9346 if (disallow_64bit_reloc)
9349 case BFD_RELOC_X86_64_DTPOFF64:
9350 case BFD_RELOC_X86_64_TPOFF64:
9351 case BFD_RELOC_64_PCREL:
9352 case BFD_RELOC_X86_64_GOTOFF64:
9353 case BFD_RELOC_X86_64_GOT64:
9354 case BFD_RELOC_X86_64_GOTPCREL64:
9355 case BFD_RELOC_X86_64_GOTPC64:
9356 case BFD_RELOC_X86_64_GOTPLT64:
9357 case BFD_RELOC_X86_64_PLTOFF64:
9358 as_bad_where (fixp->fx_file, fixp->fx_line,
9359 _("cannot represent relocation type %s in x32 mode"),
9360 bfd_get_reloc_code_name (code));
9366 if (!fixp->fx_pcrel)
9367 rel->addend = fixp->fx_offset;
9371 case BFD_RELOC_X86_64_PLT32:
9372 case BFD_RELOC_X86_64_GOT32:
9373 case BFD_RELOC_X86_64_GOTPCREL:
9374 case BFD_RELOC_X86_64_TLSGD:
9375 case BFD_RELOC_X86_64_TLSLD:
9376 case BFD_RELOC_X86_64_GOTTPOFF:
9377 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9378 case BFD_RELOC_X86_64_TLSDESC_CALL:
9379 rel->addend = fixp->fx_offset - fixp->fx_size;
9382 rel->addend = (section->vma
9384 + fixp->fx_addnumber
9385 + md_pcrel_from (fixp));
9390 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9391 if (rel->howto == NULL)
9393 as_bad_where (fixp->fx_file, fixp->fx_line,
9394 _("cannot represent relocation type %s"),
9395 bfd_get_reloc_code_name (code));
9396 /* Set howto to a garbage value so that we can keep going. */
9397 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9398 gas_assert (rel->howto != NULL);
9404 #include "tc-i386-intel.c"
9407 tc_x86_parse_to_dw2regnum (expressionS *exp)
9409 int saved_naked_reg;
9410 char saved_register_dot;
9412 saved_naked_reg = allow_naked_reg;
9413 allow_naked_reg = 1;
9414 saved_register_dot = register_chars['.'];
9415 register_chars['.'] = '.';
9416 allow_pseudo_reg = 1;
9417 expression_and_evaluate (exp);
9418 allow_pseudo_reg = 0;
9419 register_chars['.'] = saved_register_dot;
9420 allow_naked_reg = saved_naked_reg;
9422 if (exp->X_op == O_register && exp->X_add_number >= 0)
9424 if ((addressT) exp->X_add_number < i386_regtab_size)
9426 exp->X_op = O_constant;
9427 exp->X_add_number = i386_regtab[exp->X_add_number]
9428 .dw2_regnum[flag_code >> 1];
9431 exp->X_op = O_illegal;
9436 tc_x86_frame_initial_instructions (void)
9438 static unsigned int sp_regno[2];
9440 if (!sp_regno[flag_code >> 1])
9442 char *saved_input = input_line_pointer;
9443 char sp[][4] = {"esp", "rsp"};
9446 input_line_pointer = sp[flag_code >> 1];
9447 tc_x86_parse_to_dw2regnum (&exp);
9448 gas_assert (exp.X_op == O_constant);
9449 sp_regno[flag_code >> 1] = exp.X_add_number;
9450 input_line_pointer = saved_input;
9453 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9454 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9458 x86_dwarf2_addr_size (void)
9460 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9461 if (x86_elf_abi == X86_64_X32_ABI)
9464 return bfd_arch_bits_per_address (stdoutput) / 8;
9468 i386_elf_section_type (const char *str, size_t len)
9470 if (flag_code == CODE_64BIT
9471 && len == sizeof ("unwind") - 1
9472 && strncmp (str, "unwind", 6) == 0)
9473 return SHT_X86_64_UNWIND;
9480 i386_solaris_fix_up_eh_frame (segT sec)
9482 if (flag_code == CODE_64BIT)
9483 elf_section_type (sec) = SHT_X86_64_UNWIND;
9489 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9493 exp.X_op = O_secrel;
9494 exp.X_add_symbol = symbol;
9495 exp.X_add_number = 0;
9496 emit_expr (&exp, size);
9500 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9501 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9504 x86_64_section_letter (int letter, char **ptr_msg)
9506 if (flag_code == CODE_64BIT)
9509 return SHF_X86_64_LARGE;
9511 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9514 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9519 x86_64_section_word (char *str, size_t len)
9521 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9522 return SHF_X86_64_LARGE;
9528 handle_large_common (int small ATTRIBUTE_UNUSED)
9530 if (flag_code != CODE_64BIT)
9532 s_comm_internal (0, elf_common_parse);
9533 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9537 static segT lbss_section;
9538 asection *saved_com_section_ptr = elf_com_section_ptr;
9539 asection *saved_bss_section = bss_section;
9541 if (lbss_section == NULL)
9543 flagword applicable;
9545 subsegT subseg = now_subseg;
9547 /* The .lbss section is for local .largecomm symbols. */
9548 lbss_section = subseg_new (".lbss", 0);
9549 applicable = bfd_applicable_section_flags (stdoutput);
9550 bfd_set_section_flags (stdoutput, lbss_section,
9551 applicable & SEC_ALLOC);
9552 seg_info (lbss_section)->bss = 1;
9554 subseg_set (seg, subseg);
9557 elf_com_section_ptr = &_bfd_elf_large_com_section;
9558 bss_section = lbss_section;
9560 s_comm_internal (0, elf_common_parse);
9562 elf_com_section_ptr = saved_com_section_ptr;
9563 bss_section = saved_bss_section;
9566 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */