1 /* tc-i386.c -- Assemble code for the Intel 80386
2 Copyright 1989, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
5 Free Software Foundation, Inc.
7 This file is part of GAS, the GNU Assembler.
9 GAS is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GAS is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GAS; see the file COPYING. If not, write to the Free
21 Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24 /* Intel 80386 machine specific gas.
25 Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
26 x86_64 support by Jan Hubicka (jh@suse.cz)
27 VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
28 Bugs & suggestions are completely welcome. This is free software.
29 Please help us make it better. */
32 #include "safe-ctype.h"
34 #include "dwarf2dbg.h"
35 #include "dw2gencfi.h"
36 #include "elf/x86-64.h"
37 #include "opcodes/i386-init.h"
39 #ifndef REGISTER_WARNINGS
40 #define REGISTER_WARNINGS 1
43 #ifndef INFER_ADDR_PREFIX
44 #define INFER_ADDR_PREFIX 1
48 #define DEFAULT_ARCH "i386"
53 #define INLINE __inline__
59 /* Prefixes will be emitted in the order defined below.
60 WAIT_PREFIX must be the first prefix since FWAIT is really is an
61 instruction, and so must come before any prefixes.
62 The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
63 REP_PREFIX/HLE_PREFIX, LOCK_PREFIX. */
69 #define HLE_PREFIX REP_PREFIX
71 #define REX_PREFIX 6 /* must come last. */
72 #define MAX_PREFIXES 7 /* max prefixes per opcode */
74 /* we define the syntax here (modulo base,index,scale syntax) */
75 #define REGISTER_PREFIX '%'
76 #define IMMEDIATE_PREFIX '$'
77 #define ABSOLUTE_PREFIX '*'
79 /* these are the instruction mnemonic suffixes in AT&T syntax or
80 memory operand size in Intel syntax. */
81 #define WORD_MNEM_SUFFIX 'w'
82 #define BYTE_MNEM_SUFFIX 'b'
83 #define SHORT_MNEM_SUFFIX 's'
84 #define LONG_MNEM_SUFFIX 'l'
85 #define QWORD_MNEM_SUFFIX 'q'
86 #define XMMWORD_MNEM_SUFFIX 'x'
87 #define YMMWORD_MNEM_SUFFIX 'y'
88 /* Intel Syntax. Use a non-ascii letter since since it never appears
90 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
92 #define END_OF_INSN '\0'
95 'templates' is for grouping together 'template' structures for opcodes
96 of the same name. This is only used for storing the insns in the grand
97 ole hash table of insns.
98 The templates themselves start at START and range up to (but not including)
103 const insn_template *start;
104 const insn_template *end;
108 /* 386 operand encoding bytes: see 386 book for details of this. */
111 unsigned int regmem; /* codes register or memory operand */
112 unsigned int reg; /* codes register operand (or extended opcode) */
113 unsigned int mode; /* how to interpret regmem & reg */
117 /* x86-64 extension prefix. */
118 typedef int rex_byte;
120 /* 386 opcode byte to code indirect addressing. */
129 /* x86 arch names, types and features */
132 const char *name; /* arch name */
133 unsigned int len; /* arch string length */
134 enum processor_type type; /* arch type */
135 i386_cpu_flags flags; /* cpu feature flags */
136 unsigned int skip; /* show_arch should skip this. */
137 unsigned int negated; /* turn off indicated flags. */
141 static void update_code_flag (int, int);
142 static void set_code_flag (int);
143 static void set_16bit_gcc_code_flag (int);
144 static void set_intel_syntax (int);
145 static void set_intel_mnemonic (int);
146 static void set_allow_index_reg (int);
147 static void set_sse_check (int);
148 static void set_cpu_arch (int);
150 static void pe_directive_secrel (int);
152 static void signed_cons (int);
153 static char *output_invalid (int c);
154 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
156 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
158 static int i386_att_operand (char *);
159 static int i386_intel_operand (char *, int);
160 static int i386_intel_simplify (expressionS *);
161 static int i386_intel_parse_name (const char *, expressionS *);
162 static const reg_entry *parse_register (char *, char **);
163 static char *parse_insn (char *, char *);
164 static char *parse_operands (char *, const char *);
165 static void swap_operands (void);
166 static void swap_2_operands (int, int);
167 static void optimize_imm (void);
168 static void optimize_disp (void);
169 static const insn_template *match_template (void);
170 static int check_string (void);
171 static int process_suffix (void);
172 static int check_byte_reg (void);
173 static int check_long_reg (void);
174 static int check_qword_reg (void);
175 static int check_word_reg (void);
176 static int finalize_imm (void);
177 static int process_operands (void);
178 static const seg_entry *build_modrm_byte (void);
179 static void output_insn (void);
180 static void output_imm (fragS *, offsetT);
181 static void output_disp (fragS *, offsetT);
183 static void s_bss (int);
185 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
186 static void handle_large_common (int small ATTRIBUTE_UNUSED);
189 static const char *default_arch = DEFAULT_ARCH;
194 /* VEX prefix is either 2 byte or 3 byte. */
195 unsigned char bytes[3];
197 /* Destination or source register specifier. */
198 const reg_entry *register_specifier;
201 /* 'md_assemble ()' gathers together information and puts it into a
208 const reg_entry *regs;
213 operand_size_mismatch,
214 operand_type_mismatch,
215 register_type_mismatch,
216 number_of_operands_mismatch,
217 invalid_instruction_suffix,
220 unsupported_with_intel_mnemonic,
223 invalid_vsib_address,
224 unsupported_vector_index_register
229 /* TM holds the template for the insn were currently assembling. */
232 /* SUFFIX holds the instruction size suffix for byte, word, dword
233 or qword, if given. */
236 /* OPERANDS gives the number of given operands. */
237 unsigned int operands;
239 /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
240 of given register, displacement, memory operands and immediate
242 unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
244 /* TYPES [i] is the type (see above #defines) which tells us how to
245 use OP[i] for the corresponding operand. */
246 i386_operand_type types[MAX_OPERANDS];
248 /* Displacement expression, immediate expression, or register for each
250 union i386_op op[MAX_OPERANDS];
252 /* Flags for operands. */
253 unsigned int flags[MAX_OPERANDS];
254 #define Operand_PCrel 1
256 /* Relocation type for operand */
257 enum bfd_reloc_code_real reloc[MAX_OPERANDS];
259 /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
260 the base index byte below. */
261 const reg_entry *base_reg;
262 const reg_entry *index_reg;
263 unsigned int log2_scale_factor;
265 /* SEG gives the seg_entries of this insn. They are zero unless
266 explicit segment overrides are given. */
267 const seg_entry *seg[2];
269 /* PREFIX holds all the given prefix opcodes (usually null).
270 PREFIXES is the number of prefix opcodes. */
271 unsigned int prefixes;
272 unsigned char prefix[MAX_PREFIXES];
274 /* RM and SIB are the modrm byte and the sib byte where the
275 addressing modes of this insn are encoded. */
281 /* Swap operand in encoding. */
282 unsigned int swap_operand;
284 /* Prefer 8bit or 32bit displacement in encoding. */
287 disp_encoding_default = 0,
292 /* Have HLE prefix. */
293 unsigned int have_hle;
296 enum i386_error error;
299 typedef struct _i386_insn i386_insn;
301 /* List of chars besides those in app.c:symbol_chars that can start an
302 operand. Used to prevent the scrubber eating vital white-space. */
303 const char extra_symbol_chars[] = "*%-(["
312 #if (defined (TE_I386AIX) \
313 || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)) \
314 && !defined (TE_GNU) \
315 && !defined (TE_LINUX) \
316 && !defined (TE_NACL) \
317 && !defined (TE_NETWARE) \
318 && !defined (TE_FreeBSD) \
319 && !defined (TE_DragonFly) \
320 && !defined (TE_NetBSD)))
321 /* This array holds the chars that always start a comment. If the
322 pre-processor is disabled, these aren't very useful. The option
323 --divide will remove '/' from this list. */
324 const char *i386_comment_chars = "#/";
325 #define SVR4_COMMENT_CHARS 1
326 #define PREFIX_SEPARATOR '\\'
329 const char *i386_comment_chars = "#";
330 #define PREFIX_SEPARATOR '/'
333 /* This array holds the chars that only start a comment at the beginning of
334 a line. If the line seems to have the form '# 123 filename'
335 .line and .file directives will appear in the pre-processed output.
336 Note that input_file.c hand checks for '#' at the beginning of the
337 first line of the input file. This is because the compiler outputs
338 #NO_APP at the beginning of its output.
339 Also note that comments started like this one will always work if
340 '/' isn't otherwise defined. */
341 const char line_comment_chars[] = "#/";
343 const char line_separator_chars[] = ";";
345 /* Chars that can be used to separate mant from exp in floating point
347 const char EXP_CHARS[] = "eE";
349 /* Chars that mean this number is a floating point constant
352 const char FLT_CHARS[] = "fFdDxX";
354 /* Tables for lexical analysis. */
355 static char mnemonic_chars[256];
356 static char register_chars[256];
357 static char operand_chars[256];
358 static char identifier_chars[256];
359 static char digit_chars[256];
361 /* Lexical macros. */
362 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
363 #define is_operand_char(x) (operand_chars[(unsigned char) x])
364 #define is_register_char(x) (register_chars[(unsigned char) x])
365 #define is_space_char(x) ((x) == ' ')
366 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
367 #define is_digit_char(x) (digit_chars[(unsigned char) x])
369 /* All non-digit non-letter characters that may occur in an operand. */
370 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
372 /* md_assemble() always leaves the strings it's passed unaltered. To
373 effect this we maintain a stack of saved characters that we've smashed
374 with '\0's (indicating end of strings for various sub-fields of the
375 assembler instruction). */
376 static char save_stack[32];
377 static char *save_stack_p;
378 #define END_STRING_AND_SAVE(s) \
379 do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
380 #define RESTORE_END_STRING(s) \
381 do { *(s) = *--save_stack_p; } while (0)
383 /* The instruction we're assembling. */
386 /* Possible templates for current insn. */
387 static const templates *current_templates;
389 /* Per instruction expressionS buffers: max displacements & immediates. */
390 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
391 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
393 /* Current operand we are working on. */
394 static int this_operand = -1;
396 /* We support four different modes. FLAG_CODE variable is used to distinguish
404 static enum flag_code flag_code;
405 static unsigned int object_64bit;
406 static unsigned int disallow_64bit_reloc;
407 static int use_rela_relocations = 0;
409 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
410 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
411 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
413 /* The ELF ABI to use. */
421 static enum x86_elf_abi x86_elf_abi = I386_ABI;
424 /* The names used to print error messages. */
425 static const char *flag_code_names[] =
432 /* 1 for intel syntax,
434 static int intel_syntax = 0;
436 /* 1 for intel mnemonic,
437 0 if att mnemonic. */
438 static int intel_mnemonic = !SYSV386_COMPAT;
440 /* 1 if support old (<= 2.8.1) versions of gcc. */
441 static int old_gcc = OLDGCC_COMPAT;
443 /* 1 if pseudo registers are permitted. */
444 static int allow_pseudo_reg = 0;
446 /* 1 if register prefix % not required. */
447 static int allow_naked_reg = 0;
449 /* 1 if pseudo index register, eiz/riz, is allowed . */
450 static int allow_index_reg = 0;
460 /* Register prefix used for error message. */
461 static const char *register_prefix = "%";
463 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
464 leave, push, and pop instructions so that gcc has the same stack
465 frame as in 32 bit mode. */
466 static char stackop_size = '\0';
468 /* Non-zero to optimize code alignment. */
469 int optimize_align_code = 1;
471 /* Non-zero to quieten some warnings. */
472 static int quiet_warnings = 0;
475 static const char *cpu_arch_name = NULL;
476 static char *cpu_sub_arch_name = NULL;
478 /* CPU feature flags. */
479 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
481 /* If we have selected a cpu we are generating instructions for. */
482 static int cpu_arch_tune_set = 0;
484 /* Cpu we are generating instructions for. */
485 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
487 /* CPU feature flags of cpu we are generating instructions for. */
488 static i386_cpu_flags cpu_arch_tune_flags;
490 /* CPU instruction set architecture used. */
491 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
493 /* CPU feature flags of instruction set architecture used. */
494 i386_cpu_flags cpu_arch_isa_flags;
496 /* If set, conditional jumps are not automatically promoted to handle
497 larger than a byte offset. */
498 static unsigned int no_cond_jump_promotion = 0;
500 /* Encode SSE instructions with VEX prefix. */
501 static unsigned int sse2avx;
503 /* Encode scalar AVX instructions with specific vector length. */
510 /* Pre-defined "_GLOBAL_OFFSET_TABLE_". */
511 static symbolS *GOT_symbol;
513 /* The dwarf2 return column, adjusted for 32 or 64 bit. */
514 unsigned int x86_dwarf2_return_column;
516 /* The dwarf2 data alignment, adjusted for 32 or 64 bit. */
517 int x86_cie_data_alignment;
519 /* Interface to relax_segment.
520 There are 3 major relax states for 386 jump insns because the
521 different types of jumps add different sizes to frags when we're
522 figuring out what sort of jump to choose to reach a given label. */
525 #define UNCOND_JUMP 0
527 #define COND_JUMP86 2
532 #define SMALL16 (SMALL | CODE16)
534 #define BIG16 (BIG | CODE16)
538 #define INLINE __inline__
544 #define ENCODE_RELAX_STATE(type, size) \
545 ((relax_substateT) (((type) << 2) | (size)))
546 #define TYPE_FROM_RELAX_STATE(s) \
548 #define DISP_SIZE_FROM_RELAX_STATE(s) \
549 ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
551 /* This table is used by relax_frag to promote short jumps to long
552 ones where necessary. SMALL (short) jumps may be promoted to BIG
553 (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long). We
554 don't allow a short jump in a 32 bit code segment to be promoted to
555 a 16 bit offset jump because it's slower (requires data size
556 prefix), and doesn't work, unless the destination is in the bottom
557 64k of the code segment (The top 16 bits of eip are zeroed). */
559 const relax_typeS md_relax_table[] =
562 1) most positive reach of this state,
563 2) most negative reach of this state,
564 3) how many bytes this mode will have in the variable part of the frag
565 4) which index into the table to try if we can't fit into this one. */
567 /* UNCOND_JUMP states. */
568 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
569 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
570 /* dword jmp adds 4 bytes to frag:
571 0 extra opcode bytes, 4 displacement bytes. */
573 /* word jmp adds 2 byte2 to frag:
574 0 extra opcode bytes, 2 displacement bytes. */
577 /* COND_JUMP states. */
578 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
579 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
580 /* dword conditionals adds 5 bytes to frag:
581 1 extra opcode byte, 4 displacement bytes. */
583 /* word conditionals add 3 bytes to frag:
584 1 extra opcode byte, 2 displacement bytes. */
587 /* COND_JUMP86 states. */
588 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
589 {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
590 /* dword conditionals adds 5 bytes to frag:
591 1 extra opcode byte, 4 displacement bytes. */
593 /* word conditionals add 4 bytes to frag:
594 1 displacement byte and a 3 byte long branch insn. */
598 static const arch_entry cpu_arch[] =
600 /* Do not replace the first two entries - i386_target_format()
601 relies on them being there in this order. */
602 { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
603 CPU_GENERIC32_FLAGS, 0, 0 },
604 { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
605 CPU_GENERIC64_FLAGS, 0, 0 },
606 { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
607 CPU_NONE_FLAGS, 0, 0 },
608 { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
609 CPU_I186_FLAGS, 0, 0 },
610 { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
611 CPU_I286_FLAGS, 0, 0 },
612 { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
613 CPU_I386_FLAGS, 0, 0 },
614 { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
615 CPU_I486_FLAGS, 0, 0 },
616 { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
617 CPU_I586_FLAGS, 0, 0 },
618 { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
619 CPU_I686_FLAGS, 0, 0 },
620 { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
621 CPU_I586_FLAGS, 0, 0 },
622 { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
623 CPU_PENTIUMPRO_FLAGS, 0, 0 },
624 { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
625 CPU_P2_FLAGS, 0, 0 },
626 { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
627 CPU_P3_FLAGS, 0, 0 },
628 { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
629 CPU_P4_FLAGS, 0, 0 },
630 { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
631 CPU_CORE_FLAGS, 0, 0 },
632 { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
633 CPU_NOCONA_FLAGS, 0, 0 },
634 { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
635 CPU_CORE_FLAGS, 1, 0 },
636 { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
637 CPU_CORE_FLAGS, 0, 0 },
638 { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
639 CPU_CORE2_FLAGS, 1, 0 },
640 { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
641 CPU_CORE2_FLAGS, 0, 0 },
642 { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
643 CPU_COREI7_FLAGS, 0, 0 },
644 { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
645 CPU_L1OM_FLAGS, 0, 0 },
646 { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
647 CPU_K1OM_FLAGS, 0, 0 },
648 { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
649 CPU_K6_FLAGS, 0, 0 },
650 { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
651 CPU_K6_2_FLAGS, 0, 0 },
652 { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
653 CPU_ATHLON_FLAGS, 0, 0 },
654 { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
655 CPU_K8_FLAGS, 1, 0 },
656 { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
657 CPU_K8_FLAGS, 0, 0 },
658 { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
659 CPU_K8_FLAGS, 0, 0 },
660 { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
661 CPU_AMDFAM10_FLAGS, 0, 0 },
662 { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
663 CPU_BDVER1_FLAGS, 0, 0 },
664 { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
665 CPU_BDVER2_FLAGS, 0, 0 },
666 { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
667 CPU_8087_FLAGS, 0, 0 },
668 { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
669 CPU_287_FLAGS, 0, 0 },
670 { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
671 CPU_387_FLAGS, 0, 0 },
672 { STRING_COMMA_LEN (".no87"), PROCESSOR_UNKNOWN,
673 CPU_ANY87_FLAGS, 0, 1 },
674 { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
675 CPU_MMX_FLAGS, 0, 0 },
676 { STRING_COMMA_LEN (".nommx"), PROCESSOR_UNKNOWN,
677 CPU_3DNOWA_FLAGS, 0, 1 },
678 { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
679 CPU_SSE_FLAGS, 0, 0 },
680 { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
681 CPU_SSE2_FLAGS, 0, 0 },
682 { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
683 CPU_SSE3_FLAGS, 0, 0 },
684 { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
685 CPU_SSSE3_FLAGS, 0, 0 },
686 { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
687 CPU_SSE4_1_FLAGS, 0, 0 },
688 { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
689 CPU_SSE4_2_FLAGS, 0, 0 },
690 { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
691 CPU_SSE4_2_FLAGS, 0, 0 },
692 { STRING_COMMA_LEN (".nosse"), PROCESSOR_UNKNOWN,
693 CPU_ANY_SSE_FLAGS, 0, 1 },
694 { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
695 CPU_AVX_FLAGS, 0, 0 },
696 { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
697 CPU_AVX2_FLAGS, 0, 0 },
698 { STRING_COMMA_LEN (".noavx"), PROCESSOR_UNKNOWN,
699 CPU_ANY_AVX_FLAGS, 0, 1 },
700 { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
701 CPU_VMX_FLAGS, 0, 0 },
702 { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
703 CPU_VMFUNC_FLAGS, 0, 0 },
704 { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
705 CPU_SMX_FLAGS, 0, 0 },
706 { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
707 CPU_XSAVE_FLAGS, 0, 0 },
708 { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
709 CPU_XSAVEOPT_FLAGS, 0, 0 },
710 { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
711 CPU_AES_FLAGS, 0, 0 },
712 { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
713 CPU_PCLMUL_FLAGS, 0, 0 },
714 { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
715 CPU_PCLMUL_FLAGS, 1, 0 },
716 { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
717 CPU_FSGSBASE_FLAGS, 0, 0 },
718 { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
719 CPU_RDRND_FLAGS, 0, 0 },
720 { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
721 CPU_F16C_FLAGS, 0, 0 },
722 { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
723 CPU_BMI2_FLAGS, 0, 0 },
724 { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
725 CPU_FMA_FLAGS, 0, 0 },
726 { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
727 CPU_FMA4_FLAGS, 0, 0 },
728 { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
729 CPU_XOP_FLAGS, 0, 0 },
730 { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
731 CPU_LWP_FLAGS, 0, 0 },
732 { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
733 CPU_MOVBE_FLAGS, 0, 0 },
734 { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
735 CPU_EPT_FLAGS, 0, 0 },
736 { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
737 CPU_LZCNT_FLAGS, 0, 0 },
738 { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
739 CPU_HLE_FLAGS, 0, 0 },
740 { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
741 CPU_RTM_FLAGS, 0, 0 },
742 { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
743 CPU_INVPCID_FLAGS, 0, 0 },
744 { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
745 CPU_CLFLUSH_FLAGS, 0, 0 },
746 { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
747 CPU_NOP_FLAGS, 0, 0 },
748 { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
749 CPU_SYSCALL_FLAGS, 0, 0 },
750 { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
751 CPU_RDTSCP_FLAGS, 0, 0 },
752 { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
753 CPU_3DNOW_FLAGS, 0, 0 },
754 { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
755 CPU_3DNOWA_FLAGS, 0, 0 },
756 { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
757 CPU_PADLOCK_FLAGS, 0, 0 },
758 { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
759 CPU_SVME_FLAGS, 1, 0 },
760 { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
761 CPU_SVME_FLAGS, 0, 0 },
762 { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
763 CPU_SSE4A_FLAGS, 0, 0 },
764 { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
765 CPU_ABM_FLAGS, 0, 0 },
766 { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
767 CPU_BMI_FLAGS, 0, 0 },
768 { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
769 CPU_TBM_FLAGS, 0, 0 },
773 /* Like s_lcomm_internal in gas/read.c but the alignment string
774 is allowed to be optional. */
777 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
784 && *input_line_pointer == ',')
786 align = parse_align (needs_align - 1);
788 if (align == (addressT) -1)
803 bss_alloc (symbolP, size, align);
808 pe_lcomm (int needs_align)
810 s_comm_internal (needs_align * 2, pe_lcomm_internal);
814 const pseudo_typeS md_pseudo_table[] =
816 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
817 {"align", s_align_bytes, 0},
819 {"align", s_align_ptwo, 0},
821 {"arch", set_cpu_arch, 0},
825 {"lcomm", pe_lcomm, 1},
827 {"ffloat", float_cons, 'f'},
828 {"dfloat", float_cons, 'd'},
829 {"tfloat", float_cons, 'x'},
831 {"slong", signed_cons, 4},
832 {"noopt", s_ignore, 0},
833 {"optim", s_ignore, 0},
834 {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
835 {"code16", set_code_flag, CODE_16BIT},
836 {"code32", set_code_flag, CODE_32BIT},
837 {"code64", set_code_flag, CODE_64BIT},
838 {"intel_syntax", set_intel_syntax, 1},
839 {"att_syntax", set_intel_syntax, 0},
840 {"intel_mnemonic", set_intel_mnemonic, 1},
841 {"att_mnemonic", set_intel_mnemonic, 0},
842 {"allow_index_reg", set_allow_index_reg, 1},
843 {"disallow_index_reg", set_allow_index_reg, 0},
844 {"sse_check", set_sse_check, 0},
845 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
846 {"largecomm", handle_large_common, 0},
848 {"file", (void (*) (int)) dwarf2_directive_file, 0},
849 {"loc", dwarf2_directive_loc, 0},
850 {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
853 {"secrel32", pe_directive_secrel, 0},
858 /* For interface with expression (). */
859 extern char *input_line_pointer;
861 /* Hash table for instruction mnemonic lookup. */
862 static struct hash_control *op_hash;
864 /* Hash table for register lookup. */
865 static struct hash_control *reg_hash;
868 i386_align_code (fragS *fragP, int count)
870 /* Various efficient no-op patterns for aligning code labels.
871 Note: Don't try to assemble the instructions in the comments.
872 0L and 0w are not legal. */
873 static const char f32_1[] =
875 static const char f32_2[] =
876 {0x66,0x90}; /* xchg %ax,%ax */
877 static const char f32_3[] =
878 {0x8d,0x76,0x00}; /* leal 0(%esi),%esi */
879 static const char f32_4[] =
880 {0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
881 static const char f32_5[] =
883 0x8d,0x74,0x26,0x00}; /* leal 0(%esi,1),%esi */
884 static const char f32_6[] =
885 {0x8d,0xb6,0x00,0x00,0x00,0x00}; /* leal 0L(%esi),%esi */
886 static const char f32_7[] =
887 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
888 static const char f32_8[] =
890 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00}; /* leal 0L(%esi,1),%esi */
891 static const char f32_9[] =
892 {0x89,0xf6, /* movl %esi,%esi */
893 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
894 static const char f32_10[] =
895 {0x8d,0x76,0x00, /* leal 0(%esi),%esi */
896 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
897 static const char f32_11[] =
898 {0x8d,0x74,0x26,0x00, /* leal 0(%esi,1),%esi */
899 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
900 static const char f32_12[] =
901 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
902 0x8d,0xbf,0x00,0x00,0x00,0x00}; /* leal 0L(%edi),%edi */
903 static const char f32_13[] =
904 {0x8d,0xb6,0x00,0x00,0x00,0x00, /* leal 0L(%esi),%esi */
905 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
906 static const char f32_14[] =
907 {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00, /* leal 0L(%esi,1),%esi */
908 0x8d,0xbc,0x27,0x00,0x00,0x00,0x00}; /* leal 0L(%edi,1),%edi */
909 static const char f16_3[] =
910 {0x8d,0x74,0x00}; /* lea 0(%esi),%esi */
911 static const char f16_4[] =
912 {0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
913 static const char f16_5[] =
915 0x8d,0xb4,0x00,0x00}; /* lea 0w(%si),%si */
916 static const char f16_6[] =
917 {0x89,0xf6, /* mov %si,%si */
918 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
919 static const char f16_7[] =
920 {0x8d,0x74,0x00, /* lea 0(%si),%si */
921 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
922 static const char f16_8[] =
923 {0x8d,0xb4,0x00,0x00, /* lea 0w(%si),%si */
924 0x8d,0xbd,0x00,0x00}; /* lea 0w(%di),%di */
925 static const char jump_31[] =
926 {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90, /* jmp .+31; lotsa nops */
927 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
928 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
929 0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
930 static const char *const f32_patt[] = {
931 f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
932 f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
934 static const char *const f16_patt[] = {
935 f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
938 static const char alt_3[] =
940 /* nopl 0(%[re]ax) */
941 static const char alt_4[] =
942 {0x0f,0x1f,0x40,0x00};
943 /* nopl 0(%[re]ax,%[re]ax,1) */
944 static const char alt_5[] =
945 {0x0f,0x1f,0x44,0x00,0x00};
946 /* nopw 0(%[re]ax,%[re]ax,1) */
947 static const char alt_6[] =
948 {0x66,0x0f,0x1f,0x44,0x00,0x00};
949 /* nopl 0L(%[re]ax) */
950 static const char alt_7[] =
951 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
952 /* nopl 0L(%[re]ax,%[re]ax,1) */
953 static const char alt_8[] =
954 {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
955 /* nopw 0L(%[re]ax,%[re]ax,1) */
956 static const char alt_9[] =
957 {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
958 /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
959 static const char alt_10[] =
960 {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
962 nopw %cs:0L(%[re]ax,%[re]ax,1) */
963 static const char alt_long_11[] =
965 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
968 nopw %cs:0L(%[re]ax,%[re]ax,1) */
969 static const char alt_long_12[] =
972 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
976 nopw %cs:0L(%[re]ax,%[re]ax,1) */
977 static const char alt_long_13[] =
981 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
986 nopw %cs:0L(%[re]ax,%[re]ax,1) */
987 static const char alt_long_14[] =
992 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
998 nopw %cs:0L(%[re]ax,%[re]ax,1) */
999 static const char alt_long_15[] =
1005 0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1006 /* nopl 0(%[re]ax,%[re]ax,1)
1007 nopw 0(%[re]ax,%[re]ax,1) */
1008 static const char alt_short_11[] =
1009 {0x0f,0x1f,0x44,0x00,0x00,
1010 0x66,0x0f,0x1f,0x44,0x00,0x00};
1011 /* nopw 0(%[re]ax,%[re]ax,1)
1012 nopw 0(%[re]ax,%[re]ax,1) */
1013 static const char alt_short_12[] =
1014 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1015 0x66,0x0f,0x1f,0x44,0x00,0x00};
1016 /* nopw 0(%[re]ax,%[re]ax,1)
1018 static const char alt_short_13[] =
1019 {0x66,0x0f,0x1f,0x44,0x00,0x00,
1020 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1023 static const char alt_short_14[] =
1024 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1025 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1027 nopl 0L(%[re]ax,%[re]ax,1) */
1028 static const char alt_short_15[] =
1029 {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00,
1030 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1031 static const char *const alt_short_patt[] = {
1032 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1033 alt_9, alt_10, alt_short_11, alt_short_12, alt_short_13,
1034 alt_short_14, alt_short_15
1036 static const char *const alt_long_patt[] = {
1037 f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1038 alt_9, alt_10, alt_long_11, alt_long_12, alt_long_13,
1039 alt_long_14, alt_long_15
1042 /* Only align for at least a positive non-zero boundary. */
1043 if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1046 /* We need to decide which NOP sequence to use for 32bit and
1047 64bit. When -mtune= is used:
1049 1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1050 PROCESSOR_GENERIC32, f32_patt will be used.
1051 2. For PROCESSOR_PENTIUMPRO, PROCESSOR_PENTIUM4, PROCESSOR_NOCONA,
1052 PROCESSOR_CORE, PROCESSOR_CORE2, PROCESSOR_COREI7, and
1053 PROCESSOR_GENERIC64, alt_long_patt will be used.
1054 3. For PROCESSOR_ATHLON, PROCESSOR_K6, PROCESSOR_K8 and
1055 PROCESSOR_AMDFAM10, and PROCESSOR_BD, alt_short_patt
1058 When -mtune= isn't used, alt_long_patt will be used if
1059 cpu_arch_isa_flags has CpuNop. Otherwise, f32_patt will
1062 When -march= or .arch is used, we can't use anything beyond
1063 cpu_arch_isa_flags. */
1065 if (flag_code == CODE_16BIT)
1069 memcpy (fragP->fr_literal + fragP->fr_fix,
1071 /* Adjust jump offset. */
1072 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1075 memcpy (fragP->fr_literal + fragP->fr_fix,
1076 f16_patt[count - 1], count);
1080 const char *const *patt = NULL;
1082 if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1084 /* PROCESSOR_UNKNOWN means that all ISAs may be used. */
1085 switch (cpu_arch_tune)
1087 case PROCESSOR_UNKNOWN:
1088 /* We use cpu_arch_isa_flags to check if we SHOULD
1089 optimize with nops. */
1090 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1091 patt = alt_long_patt;
1095 case PROCESSOR_PENTIUM4:
1096 case PROCESSOR_NOCONA:
1097 case PROCESSOR_CORE:
1098 case PROCESSOR_CORE2:
1099 case PROCESSOR_COREI7:
1100 case PROCESSOR_L1OM:
1101 case PROCESSOR_K1OM:
1102 case PROCESSOR_GENERIC64:
1103 patt = alt_long_patt;
1106 case PROCESSOR_ATHLON:
1108 case PROCESSOR_AMDFAM10:
1110 patt = alt_short_patt;
1112 case PROCESSOR_I386:
1113 case PROCESSOR_I486:
1114 case PROCESSOR_PENTIUM:
1115 case PROCESSOR_PENTIUMPRO:
1116 case PROCESSOR_GENERIC32:
1123 switch (fragP->tc_frag_data.tune)
1125 case PROCESSOR_UNKNOWN:
1126 /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1127 PROCESSOR_UNKNOWN. */
1131 case PROCESSOR_I386:
1132 case PROCESSOR_I486:
1133 case PROCESSOR_PENTIUM:
1135 case PROCESSOR_ATHLON:
1137 case PROCESSOR_AMDFAM10:
1139 case PROCESSOR_GENERIC32:
1140 /* We use cpu_arch_isa_flags to check if we CAN optimize
1142 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1143 patt = alt_short_patt;
1147 case PROCESSOR_PENTIUMPRO:
1148 case PROCESSOR_PENTIUM4:
1149 case PROCESSOR_NOCONA:
1150 case PROCESSOR_CORE:
1151 case PROCESSOR_CORE2:
1152 case PROCESSOR_COREI7:
1153 case PROCESSOR_L1OM:
1154 case PROCESSOR_K1OM:
1155 if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1156 patt = alt_long_patt;
1160 case PROCESSOR_GENERIC64:
1161 patt = alt_long_patt;
1166 if (patt == f32_patt)
1168 /* If the padding is less than 15 bytes, we use the normal
1169 ones. Otherwise, we use a jump instruction and adjust
1173 /* For 64bit, the limit is 3 bytes. */
1174 if (flag_code == CODE_64BIT
1175 && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1180 memcpy (fragP->fr_literal + fragP->fr_fix,
1181 patt[count - 1], count);
1184 memcpy (fragP->fr_literal + fragP->fr_fix,
1186 /* Adjust jump offset. */
1187 fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1192 /* Maximum length of an instruction is 15 byte. If the
1193 padding is greater than 15 bytes and we don't use jump,
1194 we have to break it into smaller pieces. */
1195 int padding = count;
1196 while (padding > 15)
1199 memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1204 memcpy (fragP->fr_literal + fragP->fr_fix,
1205 patt [padding - 1], padding);
1208 fragP->fr_var = count;
1212 operand_type_all_zero (const union i386_operand_type *x)
1214 switch (ARRAY_SIZE(x->array))
1223 return !x->array[0];
1230 operand_type_set (union i386_operand_type *x, unsigned int v)
1232 switch (ARRAY_SIZE(x->array))
1247 operand_type_equal (const union i386_operand_type *x,
1248 const union i386_operand_type *y)
1250 switch (ARRAY_SIZE(x->array))
1253 if (x->array[2] != y->array[2])
1256 if (x->array[1] != y->array[1])
1259 return x->array[0] == y->array[0];
1267 cpu_flags_all_zero (const union i386_cpu_flags *x)
1269 switch (ARRAY_SIZE(x->array))
1278 return !x->array[0];
1285 cpu_flags_set (union i386_cpu_flags *x, unsigned int v)
1287 switch (ARRAY_SIZE(x->array))
1302 cpu_flags_equal (const union i386_cpu_flags *x,
1303 const union i386_cpu_flags *y)
1305 switch (ARRAY_SIZE(x->array))
1308 if (x->array[2] != y->array[2])
1311 if (x->array[1] != y->array[1])
1314 return x->array[0] == y->array[0];
1322 cpu_flags_check_cpu64 (i386_cpu_flags f)
1324 return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1325 || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1328 static INLINE i386_cpu_flags
1329 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1331 switch (ARRAY_SIZE (x.array))
1334 x.array [2] &= y.array [2];
1336 x.array [1] &= y.array [1];
1338 x.array [0] &= y.array [0];
1346 static INLINE i386_cpu_flags
1347 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1349 switch (ARRAY_SIZE (x.array))
1352 x.array [2] |= y.array [2];
1354 x.array [1] |= y.array [1];
1356 x.array [0] |= y.array [0];
1364 static INLINE i386_cpu_flags
1365 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1367 switch (ARRAY_SIZE (x.array))
1370 x.array [2] &= ~y.array [2];
1372 x.array [1] &= ~y.array [1];
1374 x.array [0] &= ~y.array [0];
1382 #define CPU_FLAGS_ARCH_MATCH 0x1
1383 #define CPU_FLAGS_64BIT_MATCH 0x2
1384 #define CPU_FLAGS_AES_MATCH 0x4
1385 #define CPU_FLAGS_PCLMUL_MATCH 0x8
1386 #define CPU_FLAGS_AVX_MATCH 0x10
1388 #define CPU_FLAGS_32BIT_MATCH \
1389 (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1390 | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1391 #define CPU_FLAGS_PERFECT_MATCH \
1392 (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1394 /* Return CPU flags match bits. */
1397 cpu_flags_match (const insn_template *t)
1399 i386_cpu_flags x = t->cpu_flags;
1400 int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1402 x.bitfield.cpu64 = 0;
1403 x.bitfield.cpuno64 = 0;
1405 if (cpu_flags_all_zero (&x))
1407 /* This instruction is available on all archs. */
1408 match |= CPU_FLAGS_32BIT_MATCH;
1412 /* This instruction is available only on some archs. */
1413 i386_cpu_flags cpu = cpu_arch_flags;
1415 cpu.bitfield.cpu64 = 0;
1416 cpu.bitfield.cpuno64 = 0;
1417 cpu = cpu_flags_and (x, cpu);
1418 if (!cpu_flags_all_zero (&cpu))
1420 if (x.bitfield.cpuavx)
1422 /* We only need to check AES/PCLMUL/SSE2AVX with AVX. */
1423 if (cpu.bitfield.cpuavx)
1425 /* Check SSE2AVX. */
1426 if (!t->opcode_modifier.sse2avx|| sse2avx)
1428 match |= (CPU_FLAGS_ARCH_MATCH
1429 | CPU_FLAGS_AVX_MATCH);
1431 if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1432 match |= CPU_FLAGS_AES_MATCH;
1434 if (!x.bitfield.cpupclmul
1435 || cpu.bitfield.cpupclmul)
1436 match |= CPU_FLAGS_PCLMUL_MATCH;
1440 match |= CPU_FLAGS_ARCH_MATCH;
1443 match |= CPU_FLAGS_32BIT_MATCH;
1449 static INLINE i386_operand_type
1450 operand_type_and (i386_operand_type x, i386_operand_type y)
1452 switch (ARRAY_SIZE (x.array))
1455 x.array [2] &= y.array [2];
1457 x.array [1] &= y.array [1];
1459 x.array [0] &= y.array [0];
1467 static INLINE i386_operand_type
1468 operand_type_or (i386_operand_type x, i386_operand_type y)
1470 switch (ARRAY_SIZE (x.array))
1473 x.array [2] |= y.array [2];
1475 x.array [1] |= y.array [1];
1477 x.array [0] |= y.array [0];
1485 static INLINE i386_operand_type
1486 operand_type_xor (i386_operand_type x, i386_operand_type y)
1488 switch (ARRAY_SIZE (x.array))
1491 x.array [2] ^= y.array [2];
1493 x.array [1] ^= y.array [1];
1495 x.array [0] ^= y.array [0];
1503 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1504 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1505 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1506 static const i386_operand_type inoutportreg
1507 = OPERAND_TYPE_INOUTPORTREG;
1508 static const i386_operand_type reg16_inoutportreg
1509 = OPERAND_TYPE_REG16_INOUTPORTREG;
1510 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1511 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1512 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1513 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1514 static const i386_operand_type anydisp
1515 = OPERAND_TYPE_ANYDISP;
1516 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1517 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1518 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1519 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1520 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1521 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1522 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1523 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1524 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1525 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1526 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1527 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1538 operand_type_check (i386_operand_type t, enum operand_type c)
1543 return (t.bitfield.reg8
1546 || t.bitfield.reg64);
1549 return (t.bitfield.imm8
1553 || t.bitfield.imm32s
1554 || t.bitfield.imm64);
1557 return (t.bitfield.disp8
1558 || t.bitfield.disp16
1559 || t.bitfield.disp32
1560 || t.bitfield.disp32s
1561 || t.bitfield.disp64);
1564 return (t.bitfield.disp8
1565 || t.bitfield.disp16
1566 || t.bitfield.disp32
1567 || t.bitfield.disp32s
1568 || t.bitfield.disp64
1569 || t.bitfield.baseindex);
1578 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1579 operand J for instruction template T. */
1582 match_reg_size (const insn_template *t, unsigned int j)
1584 return !((i.types[j].bitfield.byte
1585 && !t->operand_types[j].bitfield.byte)
1586 || (i.types[j].bitfield.word
1587 && !t->operand_types[j].bitfield.word)
1588 || (i.types[j].bitfield.dword
1589 && !t->operand_types[j].bitfield.dword)
1590 || (i.types[j].bitfield.qword
1591 && !t->operand_types[j].bitfield.qword));
1594 /* Return 1 if there is no conflict in any size on operand J for
1595 instruction template T. */
1598 match_mem_size (const insn_template *t, unsigned int j)
1600 return (match_reg_size (t, j)
1601 && !((i.types[j].bitfield.unspecified
1602 && !t->operand_types[j].bitfield.unspecified)
1603 || (i.types[j].bitfield.fword
1604 && !t->operand_types[j].bitfield.fword)
1605 || (i.types[j].bitfield.tbyte
1606 && !t->operand_types[j].bitfield.tbyte)
1607 || (i.types[j].bitfield.xmmword
1608 && !t->operand_types[j].bitfield.xmmword)
1609 || (i.types[j].bitfield.ymmword
1610 && !t->operand_types[j].bitfield.ymmword)));
1613 /* Return 1 if there is no size conflict on any operands for
1614 instruction template T. */
1617 operand_size_match (const insn_template *t)
1622 /* Don't check jump instructions. */
1623 if (t->opcode_modifier.jump
1624 || t->opcode_modifier.jumpbyte
1625 || t->opcode_modifier.jumpdword
1626 || t->opcode_modifier.jumpintersegment)
1629 /* Check memory and accumulator operand size. */
1630 for (j = 0; j < i.operands; j++)
1632 if (t->operand_types[j].bitfield.anysize)
1635 if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1641 if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1650 else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1653 i.error = operand_size_mismatch;
1657 /* Check reverse. */
1658 gas_assert (i.operands == 2);
1661 for (j = 0; j < 2; j++)
1663 if (t->operand_types[j].bitfield.acc
1664 && !match_reg_size (t, j ? 0 : 1))
1667 if (i.types[j].bitfield.mem
1668 && !match_mem_size (t, j ? 0 : 1))
1676 operand_type_match (i386_operand_type overlap,
1677 i386_operand_type given)
1679 i386_operand_type temp = overlap;
1681 temp.bitfield.jumpabsolute = 0;
1682 temp.bitfield.unspecified = 0;
1683 temp.bitfield.byte = 0;
1684 temp.bitfield.word = 0;
1685 temp.bitfield.dword = 0;
1686 temp.bitfield.fword = 0;
1687 temp.bitfield.qword = 0;
1688 temp.bitfield.tbyte = 0;
1689 temp.bitfield.xmmword = 0;
1690 temp.bitfield.ymmword = 0;
1691 if (operand_type_all_zero (&temp))
1694 if (given.bitfield.baseindex == overlap.bitfield.baseindex
1695 && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1699 i.error = operand_type_mismatch;
1703 /* If given types g0 and g1 are registers they must be of the same type
1704 unless the expected operand type register overlap is null.
1705 Note that Acc in a template matches every size of reg. */
1708 operand_type_register_match (i386_operand_type m0,
1709 i386_operand_type g0,
1710 i386_operand_type t0,
1711 i386_operand_type m1,
1712 i386_operand_type g1,
1713 i386_operand_type t1)
1715 if (!operand_type_check (g0, reg))
1718 if (!operand_type_check (g1, reg))
1721 if (g0.bitfield.reg8 == g1.bitfield.reg8
1722 && g0.bitfield.reg16 == g1.bitfield.reg16
1723 && g0.bitfield.reg32 == g1.bitfield.reg32
1724 && g0.bitfield.reg64 == g1.bitfield.reg64)
1727 if (m0.bitfield.acc)
1729 t0.bitfield.reg8 = 1;
1730 t0.bitfield.reg16 = 1;
1731 t0.bitfield.reg32 = 1;
1732 t0.bitfield.reg64 = 1;
1735 if (m1.bitfield.acc)
1737 t1.bitfield.reg8 = 1;
1738 t1.bitfield.reg16 = 1;
1739 t1.bitfield.reg32 = 1;
1740 t1.bitfield.reg64 = 1;
1743 if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1744 && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1745 && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1746 && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1749 i.error = register_type_mismatch;
1754 static INLINE unsigned int
1755 mode_from_disp_size (i386_operand_type t)
1757 if (t.bitfield.disp8)
1759 else if (t.bitfield.disp16
1760 || t.bitfield.disp32
1761 || t.bitfield.disp32s)
1768 fits_in_signed_byte (offsetT num)
1770 return (num >= -128) && (num <= 127);
1774 fits_in_unsigned_byte (offsetT num)
1776 return (num & 0xff) == num;
1780 fits_in_unsigned_word (offsetT num)
1782 return (num & 0xffff) == num;
1786 fits_in_signed_word (offsetT num)
1788 return (-32768 <= num) && (num <= 32767);
1792 fits_in_signed_long (offsetT num ATTRIBUTE_UNUSED)
1797 return (!(((offsetT) -1 << 31) & num)
1798 || (((offsetT) -1 << 31) & num) == ((offsetT) -1 << 31));
1800 } /* fits_in_signed_long() */
1803 fits_in_unsigned_long (offsetT num ATTRIBUTE_UNUSED)
1808 return (num & (((offsetT) 2 << 31) - 1)) == num;
1810 } /* fits_in_unsigned_long() */
1813 fits_in_imm4 (offsetT num)
1815 return (num & 0xf) == num;
1818 static i386_operand_type
1819 smallest_imm_type (offsetT num)
1821 i386_operand_type t;
1823 operand_type_set (&t, 0);
1824 t.bitfield.imm64 = 1;
1826 if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
1828 /* This code is disabled on the 486 because all the Imm1 forms
1829 in the opcode table are slower on the i486. They're the
1830 versions with the implicitly specified single-position
1831 displacement, which has another syntax if you really want to
1833 t.bitfield.imm1 = 1;
1834 t.bitfield.imm8 = 1;
1835 t.bitfield.imm8s = 1;
1836 t.bitfield.imm16 = 1;
1837 t.bitfield.imm32 = 1;
1838 t.bitfield.imm32s = 1;
1840 else if (fits_in_signed_byte (num))
1842 t.bitfield.imm8 = 1;
1843 t.bitfield.imm8s = 1;
1844 t.bitfield.imm16 = 1;
1845 t.bitfield.imm32 = 1;
1846 t.bitfield.imm32s = 1;
1848 else if (fits_in_unsigned_byte (num))
1850 t.bitfield.imm8 = 1;
1851 t.bitfield.imm16 = 1;
1852 t.bitfield.imm32 = 1;
1853 t.bitfield.imm32s = 1;
1855 else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
1857 t.bitfield.imm16 = 1;
1858 t.bitfield.imm32 = 1;
1859 t.bitfield.imm32s = 1;
1861 else if (fits_in_signed_long (num))
1863 t.bitfield.imm32 = 1;
1864 t.bitfield.imm32s = 1;
1866 else if (fits_in_unsigned_long (num))
1867 t.bitfield.imm32 = 1;
1873 offset_in_range (offsetT val, int size)
1879 case 1: mask = ((addressT) 1 << 8) - 1; break;
1880 case 2: mask = ((addressT) 1 << 16) - 1; break;
1881 case 4: mask = ((addressT) 2 << 31) - 1; break;
1883 case 8: mask = ((addressT) 2 << 63) - 1; break;
1889 /* If BFD64, sign extend val for 32bit address mode. */
1890 if (flag_code != CODE_64BIT
1891 || i.prefix[ADDR_PREFIX])
1892 if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
1893 val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
1896 if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
1898 char buf1[40], buf2[40];
1900 sprint_value (buf1, val);
1901 sprint_value (buf2, val & mask);
1902 as_warn (_("%s shortened to %s"), buf1, buf2);
1916 a. PREFIX_EXIST if attempting to add a prefix where one from the
1917 same class already exists.
1918 b. PREFIX_LOCK if lock prefix is added.
1919 c. PREFIX_REP if rep/repne prefix is added.
1920 d. PREFIX_OTHER if other prefix is added.
1923 static enum PREFIX_GROUP
1924 add_prefix (unsigned int prefix)
1926 enum PREFIX_GROUP ret = PREFIX_OTHER;
1929 if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
1930 && flag_code == CODE_64BIT)
1932 if ((i.prefix[REX_PREFIX] & prefix & REX_W)
1933 || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
1934 && (prefix & (REX_R | REX_X | REX_B))))
1945 case CS_PREFIX_OPCODE:
1946 case DS_PREFIX_OPCODE:
1947 case ES_PREFIX_OPCODE:
1948 case FS_PREFIX_OPCODE:
1949 case GS_PREFIX_OPCODE:
1950 case SS_PREFIX_OPCODE:
1954 case REPNE_PREFIX_OPCODE:
1955 case REPE_PREFIX_OPCODE:
1960 case LOCK_PREFIX_OPCODE:
1969 case ADDR_PREFIX_OPCODE:
1973 case DATA_PREFIX_OPCODE:
1977 if (i.prefix[q] != 0)
1985 i.prefix[q] |= prefix;
1988 as_bad (_("same type of prefix used twice"));
1994 update_code_flag (int value, int check)
1996 PRINTF_LIKE ((*as_error));
1998 flag_code = (enum flag_code) value;
1999 if (flag_code == CODE_64BIT)
2001 cpu_arch_flags.bitfield.cpu64 = 1;
2002 cpu_arch_flags.bitfield.cpuno64 = 0;
2006 cpu_arch_flags.bitfield.cpu64 = 0;
2007 cpu_arch_flags.bitfield.cpuno64 = 1;
2009 if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2012 as_error = as_fatal;
2015 (*as_error) (_("64bit mode not supported on `%s'."),
2016 cpu_arch_name ? cpu_arch_name : default_arch);
2018 if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2021 as_error = as_fatal;
2024 (*as_error) (_("32bit mode not supported on `%s'."),
2025 cpu_arch_name ? cpu_arch_name : default_arch);
2027 stackop_size = '\0';
2031 set_code_flag (int value)
2033 update_code_flag (value, 0);
2037 set_16bit_gcc_code_flag (int new_code_flag)
2039 flag_code = (enum flag_code) new_code_flag;
2040 if (flag_code != CODE_16BIT)
2042 cpu_arch_flags.bitfield.cpu64 = 0;
2043 cpu_arch_flags.bitfield.cpuno64 = 1;
2044 stackop_size = LONG_MNEM_SUFFIX;
2048 set_intel_syntax (int syntax_flag)
2050 /* Find out if register prefixing is specified. */
2051 int ask_naked_reg = 0;
2054 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2056 char *string = input_line_pointer;
2057 int e = get_symbol_end ();
2059 if (strcmp (string, "prefix") == 0)
2061 else if (strcmp (string, "noprefix") == 0)
2064 as_bad (_("bad argument to syntax directive."));
2065 *input_line_pointer = e;
2067 demand_empty_rest_of_line ();
2069 intel_syntax = syntax_flag;
2071 if (ask_naked_reg == 0)
2072 allow_naked_reg = (intel_syntax
2073 && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2075 allow_naked_reg = (ask_naked_reg < 0);
2077 expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2079 identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2080 identifier_chars['$'] = intel_syntax ? '$' : 0;
2081 register_prefix = allow_naked_reg ? "" : "%";
2085 set_intel_mnemonic (int mnemonic_flag)
2087 intel_mnemonic = mnemonic_flag;
2091 set_allow_index_reg (int flag)
2093 allow_index_reg = flag;
2097 set_sse_check (int dummy ATTRIBUTE_UNUSED)
2101 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2103 char *string = input_line_pointer;
2104 int e = get_symbol_end ();
2106 if (strcmp (string, "none") == 0)
2107 sse_check = sse_check_none;
2108 else if (strcmp (string, "warning") == 0)
2109 sse_check = sse_check_warning;
2110 else if (strcmp (string, "error") == 0)
2111 sse_check = sse_check_error;
2113 as_bad (_("bad argument to sse_check directive."));
2114 *input_line_pointer = e;
2117 as_bad (_("missing argument for sse_check directive"));
2119 demand_empty_rest_of_line ();
2123 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2124 i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2126 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2127 static const char *arch;
2129 /* Intel LIOM is only supported on ELF. */
2135 /* Use cpu_arch_name if it is set in md_parse_option. Otherwise
2136 use default_arch. */
2137 arch = cpu_arch_name;
2139 arch = default_arch;
2142 /* If we are targeting Intel L1OM, we must enable it. */
2143 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2144 || new_flag.bitfield.cpul1om)
2147 /* If we are targeting Intel K1OM, we must enable it. */
2148 if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2149 || new_flag.bitfield.cpuk1om)
2152 as_bad (_("`%s' is not supported on `%s'"), name, arch);
2157 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2161 if (!is_end_of_line[(unsigned char) *input_line_pointer])
2163 char *string = input_line_pointer;
2164 int e = get_symbol_end ();
2166 i386_cpu_flags flags;
2168 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2170 if (strcmp (string, cpu_arch[j].name) == 0)
2172 check_cpu_arch_compatible (string, cpu_arch[j].flags);
2176 cpu_arch_name = cpu_arch[j].name;
2177 cpu_sub_arch_name = NULL;
2178 cpu_arch_flags = cpu_arch[j].flags;
2179 if (flag_code == CODE_64BIT)
2181 cpu_arch_flags.bitfield.cpu64 = 1;
2182 cpu_arch_flags.bitfield.cpuno64 = 0;
2186 cpu_arch_flags.bitfield.cpu64 = 0;
2187 cpu_arch_flags.bitfield.cpuno64 = 1;
2189 cpu_arch_isa = cpu_arch[j].type;
2190 cpu_arch_isa_flags = cpu_arch[j].flags;
2191 if (!cpu_arch_tune_set)
2193 cpu_arch_tune = cpu_arch_isa;
2194 cpu_arch_tune_flags = cpu_arch_isa_flags;
2199 if (!cpu_arch[j].negated)
2200 flags = cpu_flags_or (cpu_arch_flags,
2203 flags = cpu_flags_and_not (cpu_arch_flags,
2205 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2207 if (cpu_sub_arch_name)
2209 char *name = cpu_sub_arch_name;
2210 cpu_sub_arch_name = concat (name,
2212 (const char *) NULL);
2216 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2217 cpu_arch_flags = flags;
2218 cpu_arch_isa_flags = flags;
2220 *input_line_pointer = e;
2221 demand_empty_rest_of_line ();
2225 if (j >= ARRAY_SIZE (cpu_arch))
2226 as_bad (_("no such architecture: `%s'"), string);
2228 *input_line_pointer = e;
2231 as_bad (_("missing cpu architecture"));
2233 no_cond_jump_promotion = 0;
2234 if (*input_line_pointer == ','
2235 && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2237 char *string = ++input_line_pointer;
2238 int e = get_symbol_end ();
2240 if (strcmp (string, "nojumps") == 0)
2241 no_cond_jump_promotion = 1;
2242 else if (strcmp (string, "jumps") == 0)
2245 as_bad (_("no such architecture modifier: `%s'"), string);
2247 *input_line_pointer = e;
2250 demand_empty_rest_of_line ();
2253 enum bfd_architecture
2256 if (cpu_arch_isa == PROCESSOR_L1OM)
2258 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2259 || flag_code != CODE_64BIT)
2260 as_fatal (_("Intel L1OM is 64bit ELF only"));
2261 return bfd_arch_l1om;
2263 else if (cpu_arch_isa == PROCESSOR_K1OM)
2265 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2266 || flag_code != CODE_64BIT)
2267 as_fatal (_("Intel K1OM is 64bit ELF only"));
2268 return bfd_arch_k1om;
2271 return bfd_arch_i386;
2277 if (!strncmp (default_arch, "x86_64", 6))
2279 if (cpu_arch_isa == PROCESSOR_L1OM)
2281 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2282 || default_arch[6] != '\0')
2283 as_fatal (_("Intel L1OM is 64bit ELF only"));
2284 return bfd_mach_l1om;
2286 else if (cpu_arch_isa == PROCESSOR_K1OM)
2288 if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2289 || default_arch[6] != '\0')
2290 as_fatal (_("Intel K1OM is 64bit ELF only"));
2291 return bfd_mach_k1om;
2293 else if (default_arch[6] == '\0')
2294 return bfd_mach_x86_64;
2296 return bfd_mach_x64_32;
2298 else if (!strcmp (default_arch, "i386"))
2299 return bfd_mach_i386_i386;
2301 as_fatal (_("unknown architecture"));
2307 const char *hash_err;
2309 /* Initialize op_hash hash table. */
2310 op_hash = hash_new ();
2313 const insn_template *optab;
2314 templates *core_optab;
2316 /* Setup for loop. */
2318 core_optab = (templates *) xmalloc (sizeof (templates));
2319 core_optab->start = optab;
2324 if (optab->name == NULL
2325 || strcmp (optab->name, (optab - 1)->name) != 0)
2327 /* different name --> ship out current template list;
2328 add to hash table; & begin anew. */
2329 core_optab->end = optab;
2330 hash_err = hash_insert (op_hash,
2332 (void *) core_optab);
2335 as_fatal (_("internal Error: Can't hash %s: %s"),
2339 if (optab->name == NULL)
2341 core_optab = (templates *) xmalloc (sizeof (templates));
2342 core_optab->start = optab;
2347 /* Initialize reg_hash hash table. */
2348 reg_hash = hash_new ();
2350 const reg_entry *regtab;
2351 unsigned int regtab_size = i386_regtab_size;
2353 for (regtab = i386_regtab; regtab_size--; regtab++)
2355 hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2357 as_fatal (_("internal Error: Can't hash %s: %s"),
2363 /* Fill in lexical tables: mnemonic_chars, operand_chars. */
2368 for (c = 0; c < 256; c++)
2373 mnemonic_chars[c] = c;
2374 register_chars[c] = c;
2375 operand_chars[c] = c;
2377 else if (ISLOWER (c))
2379 mnemonic_chars[c] = c;
2380 register_chars[c] = c;
2381 operand_chars[c] = c;
2383 else if (ISUPPER (c))
2385 mnemonic_chars[c] = TOLOWER (c);
2386 register_chars[c] = mnemonic_chars[c];
2387 operand_chars[c] = c;
2390 if (ISALPHA (c) || ISDIGIT (c))
2391 identifier_chars[c] = c;
2394 identifier_chars[c] = c;
2395 operand_chars[c] = c;
2400 identifier_chars['@'] = '@';
2403 identifier_chars['?'] = '?';
2404 operand_chars['?'] = '?';
2406 digit_chars['-'] = '-';
2407 mnemonic_chars['_'] = '_';
2408 mnemonic_chars['-'] = '-';
2409 mnemonic_chars['.'] = '.';
2410 identifier_chars['_'] = '_';
2411 identifier_chars['.'] = '.';
2413 for (p = operand_special_chars; *p != '\0'; p++)
2414 operand_chars[(unsigned char) *p] = *p;
2417 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2420 record_alignment (text_section, 2);
2421 record_alignment (data_section, 2);
2422 record_alignment (bss_section, 2);
2426 if (flag_code == CODE_64BIT)
2428 #if defined (OBJ_COFF) && defined (TE_PE)
2429 x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2432 x86_dwarf2_return_column = 16;
2434 x86_cie_data_alignment = -8;
2438 x86_dwarf2_return_column = 8;
2439 x86_cie_data_alignment = -4;
2444 i386_print_statistics (FILE *file)
2446 hash_print_statistics (file, "i386 opcode", op_hash);
2447 hash_print_statistics (file, "i386 register", reg_hash);
2452 /* Debugging routines for md_assemble. */
2453 static void pte (insn_template *);
2454 static void pt (i386_operand_type);
2455 static void pe (expressionS *);
2456 static void ps (symbolS *);
2459 pi (char *line, i386_insn *x)
2463 fprintf (stdout, "%s: template ", line);
2465 fprintf (stdout, " address: base %s index %s scale %x\n",
2466 x->base_reg ? x->base_reg->reg_name : "none",
2467 x->index_reg ? x->index_reg->reg_name : "none",
2468 x->log2_scale_factor);
2469 fprintf (stdout, " modrm: mode %x reg %x reg/mem %x\n",
2470 x->rm.mode, x->rm.reg, x->rm.regmem);
2471 fprintf (stdout, " sib: base %x index %x scale %x\n",
2472 x->sib.base, x->sib.index, x->sib.scale);
2473 fprintf (stdout, " rex: 64bit %x extX %x extY %x extZ %x\n",
2474 (x->rex & REX_W) != 0,
2475 (x->rex & REX_R) != 0,
2476 (x->rex & REX_X) != 0,
2477 (x->rex & REX_B) != 0);
2478 for (j = 0; j < x->operands; j++)
2480 fprintf (stdout, " #%d: ", j + 1);
2482 fprintf (stdout, "\n");
2483 if (x->types[j].bitfield.reg8
2484 || x->types[j].bitfield.reg16
2485 || x->types[j].bitfield.reg32
2486 || x->types[j].bitfield.reg64
2487 || x->types[j].bitfield.regmmx
2488 || x->types[j].bitfield.regxmm
2489 || x->types[j].bitfield.regymm
2490 || x->types[j].bitfield.sreg2
2491 || x->types[j].bitfield.sreg3
2492 || x->types[j].bitfield.control
2493 || x->types[j].bitfield.debug
2494 || x->types[j].bitfield.test)
2495 fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2496 if (operand_type_check (x->types[j], imm))
2498 if (operand_type_check (x->types[j], disp))
2499 pe (x->op[j].disps);
2504 pte (insn_template *t)
2507 fprintf (stdout, " %d operands ", t->operands);
2508 fprintf (stdout, "opcode %x ", t->base_opcode);
2509 if (t->extension_opcode != None)
2510 fprintf (stdout, "ext %x ", t->extension_opcode);
2511 if (t->opcode_modifier.d)
2512 fprintf (stdout, "D");
2513 if (t->opcode_modifier.w)
2514 fprintf (stdout, "W");
2515 fprintf (stdout, "\n");
2516 for (j = 0; j < t->operands; j++)
2518 fprintf (stdout, " #%d type ", j + 1);
2519 pt (t->operand_types[j]);
2520 fprintf (stdout, "\n");
2527 fprintf (stdout, " operation %d\n", e->X_op);
2528 fprintf (stdout, " add_number %ld (%lx)\n",
2529 (long) e->X_add_number, (long) e->X_add_number);
2530 if (e->X_add_symbol)
2532 fprintf (stdout, " add_symbol ");
2533 ps (e->X_add_symbol);
2534 fprintf (stdout, "\n");
2538 fprintf (stdout, " op_symbol ");
2539 ps (e->X_op_symbol);
2540 fprintf (stdout, "\n");
2547 fprintf (stdout, "%s type %s%s",
2549 S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2550 segment_name (S_GET_SEGMENT (s)));
2553 static struct type_name
2555 i386_operand_type mask;
2558 const type_names[] =
2560 { OPERAND_TYPE_REG8, "r8" },
2561 { OPERAND_TYPE_REG16, "r16" },
2562 { OPERAND_TYPE_REG32, "r32" },
2563 { OPERAND_TYPE_REG64, "r64" },
2564 { OPERAND_TYPE_IMM8, "i8" },
2565 { OPERAND_TYPE_IMM8, "i8s" },
2566 { OPERAND_TYPE_IMM16, "i16" },
2567 { OPERAND_TYPE_IMM32, "i32" },
2568 { OPERAND_TYPE_IMM32S, "i32s" },
2569 { OPERAND_TYPE_IMM64, "i64" },
2570 { OPERAND_TYPE_IMM1, "i1" },
2571 { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2572 { OPERAND_TYPE_DISP8, "d8" },
2573 { OPERAND_TYPE_DISP16, "d16" },
2574 { OPERAND_TYPE_DISP32, "d32" },
2575 { OPERAND_TYPE_DISP32S, "d32s" },
2576 { OPERAND_TYPE_DISP64, "d64" },
2577 { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2578 { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2579 { OPERAND_TYPE_CONTROL, "control reg" },
2580 { OPERAND_TYPE_TEST, "test reg" },
2581 { OPERAND_TYPE_DEBUG, "debug reg" },
2582 { OPERAND_TYPE_FLOATREG, "FReg" },
2583 { OPERAND_TYPE_FLOATACC, "FAcc" },
2584 { OPERAND_TYPE_SREG2, "SReg2" },
2585 { OPERAND_TYPE_SREG3, "SReg3" },
2586 { OPERAND_TYPE_ACC, "Acc" },
2587 { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2588 { OPERAND_TYPE_REGMMX, "rMMX" },
2589 { OPERAND_TYPE_REGXMM, "rXMM" },
2590 { OPERAND_TYPE_REGYMM, "rYMM" },
2591 { OPERAND_TYPE_ESSEG, "es" },
2595 pt (i386_operand_type t)
2598 i386_operand_type a;
2600 for (j = 0; j < ARRAY_SIZE (type_names); j++)
2602 a = operand_type_and (t, type_names[j].mask);
2603 if (!operand_type_all_zero (&a))
2604 fprintf (stdout, "%s, ", type_names[j].name);
2609 #endif /* DEBUG386 */
2611 static bfd_reloc_code_real_type
2612 reloc (unsigned int size,
2615 bfd_reloc_code_real_type other)
2617 if (other != NO_RELOC)
2619 reloc_howto_type *rel;
2624 case BFD_RELOC_X86_64_GOT32:
2625 return BFD_RELOC_X86_64_GOT64;
2627 case BFD_RELOC_X86_64_PLTOFF64:
2628 return BFD_RELOC_X86_64_PLTOFF64;
2630 case BFD_RELOC_X86_64_GOTPC32:
2631 other = BFD_RELOC_X86_64_GOTPC64;
2633 case BFD_RELOC_X86_64_GOTPCREL:
2634 other = BFD_RELOC_X86_64_GOTPCREL64;
2636 case BFD_RELOC_X86_64_TPOFF32:
2637 other = BFD_RELOC_X86_64_TPOFF64;
2639 case BFD_RELOC_X86_64_DTPOFF32:
2640 other = BFD_RELOC_X86_64_DTPOFF64;
2646 /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless. */
2647 if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2650 rel = bfd_reloc_type_lookup (stdoutput, other);
2652 as_bad (_("unknown relocation (%u)"), other);
2653 else if (size != bfd_get_reloc_size (rel))
2654 as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2655 bfd_get_reloc_size (rel),
2657 else if (pcrel && !rel->pc_relative)
2658 as_bad (_("non-pc-relative relocation for pc-relative field"));
2659 else if ((rel->complain_on_overflow == complain_overflow_signed
2661 || (rel->complain_on_overflow == complain_overflow_unsigned
2663 as_bad (_("relocated field and relocation type differ in signedness"));
2672 as_bad (_("there are no unsigned pc-relative relocations"));
2675 case 1: return BFD_RELOC_8_PCREL;
2676 case 2: return BFD_RELOC_16_PCREL;
2677 case 4: return BFD_RELOC_32_PCREL;
2678 case 8: return BFD_RELOC_64_PCREL;
2680 as_bad (_("cannot do %u byte pc-relative relocation"), size);
2687 case 4: return BFD_RELOC_X86_64_32S;
2692 case 1: return BFD_RELOC_8;
2693 case 2: return BFD_RELOC_16;
2694 case 4: return BFD_RELOC_32;
2695 case 8: return BFD_RELOC_64;
2697 as_bad (_("cannot do %s %u byte relocation"),
2698 sign > 0 ? "signed" : "unsigned", size);
2704 /* Here we decide which fixups can be adjusted to make them relative to
2705 the beginning of the section instead of the symbol. Basically we need
2706 to make sure that the dynamic relocations are done correctly, so in
2707 some cases we force the original symbol to be used. */
2710 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2712 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2716 /* Don't adjust pc-relative references to merge sections in 64-bit
2718 if (use_rela_relocations
2719 && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
2723 /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
2724 and changed later by validate_fix. */
2725 if (GOT_symbol && fixP->fx_subsy == GOT_symbol
2726 && fixP->fx_r_type == BFD_RELOC_32_PCREL)
2729 /* adjust_reloc_syms doesn't know about the GOT. */
2730 if (fixP->fx_r_type == BFD_RELOC_386_GOTOFF
2731 || fixP->fx_r_type == BFD_RELOC_386_PLT32
2732 || fixP->fx_r_type == BFD_RELOC_386_GOT32
2733 || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
2734 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
2735 || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
2736 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
2737 || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
2738 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
2739 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
2740 || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
2741 || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
2742 || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
2743 || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
2744 || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
2745 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
2746 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
2747 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
2748 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
2749 || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
2750 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
2751 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
2752 || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
2753 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
2754 || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
2755 || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
2756 || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
2757 || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
2764 intel_float_operand (const char *mnemonic)
2766 /* Note that the value returned is meaningful only for opcodes with (memory)
2767 operands, hence the code here is free to improperly handle opcodes that
2768 have no operands (for better performance and smaller code). */
2770 if (mnemonic[0] != 'f')
2771 return 0; /* non-math */
2773 switch (mnemonic[1])
2775 /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
2776 the fs segment override prefix not currently handled because no
2777 call path can make opcodes without operands get here */
2779 return 2 /* integer op */;
2781 if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
2782 return 3; /* fldcw/fldenv */
2785 if (mnemonic[2] != 'o' /* fnop */)
2786 return 3; /* non-waiting control op */
2789 if (mnemonic[2] == 's')
2790 return 3; /* frstor/frstpm */
2793 if (mnemonic[2] == 'a')
2794 return 3; /* fsave */
2795 if (mnemonic[2] == 't')
2797 switch (mnemonic[3])
2799 case 'c': /* fstcw */
2800 case 'd': /* fstdw */
2801 case 'e': /* fstenv */
2802 case 's': /* fsts[gw] */
2808 if (mnemonic[2] == 'r' || mnemonic[2] == 's')
2809 return 0; /* fxsave/fxrstor are not really math ops */
2816 /* Build the VEX prefix. */
2819 build_vex_prefix (const insn_template *t)
2821 unsigned int register_specifier;
2822 unsigned int implied_prefix;
2823 unsigned int vector_length;
2825 /* Check register specifier. */
2826 if (i.vex.register_specifier)
2828 register_specifier = i.vex.register_specifier->reg_num;
2829 if ((i.vex.register_specifier->reg_flags & RegRex))
2830 register_specifier += 8;
2831 register_specifier = ~register_specifier & 0xf;
2834 register_specifier = 0xf;
2836 /* Use 2-byte VEX prefix by swappping destination and source
2839 && i.operands == i.reg_operands
2840 && i.tm.opcode_modifier.vexopcode == VEX0F
2841 && i.tm.opcode_modifier.s
2844 unsigned int xchg = i.operands - 1;
2845 union i386_op temp_op;
2846 i386_operand_type temp_type;
2848 temp_type = i.types[xchg];
2849 i.types[xchg] = i.types[0];
2850 i.types[0] = temp_type;
2851 temp_op = i.op[xchg];
2852 i.op[xchg] = i.op[0];
2855 gas_assert (i.rm.mode == 3);
2859 i.rm.regmem = i.rm.reg;
2862 /* Use the next insn. */
2866 if (i.tm.opcode_modifier.vex == VEXScalar)
2867 vector_length = avxscalar;
2869 vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
2871 switch ((i.tm.base_opcode >> 8) & 0xff)
2876 case DATA_PREFIX_OPCODE:
2879 case REPE_PREFIX_OPCODE:
2882 case REPNE_PREFIX_OPCODE:
2889 /* Use 2-byte VEX prefix if possible. */
2890 if (i.tm.opcode_modifier.vexopcode == VEX0F
2891 && i.tm.opcode_modifier.vexw != VEXW1
2892 && (i.rex & (REX_W | REX_X | REX_B)) == 0)
2894 /* 2-byte VEX prefix. */
2898 i.vex.bytes[0] = 0xc5;
2900 /* Check the REX.R bit. */
2901 r = (i.rex & REX_R) ? 0 : 1;
2902 i.vex.bytes[1] = (r << 7
2903 | register_specifier << 3
2904 | vector_length << 2
2909 /* 3-byte VEX prefix. */
2914 switch (i.tm.opcode_modifier.vexopcode)
2918 i.vex.bytes[0] = 0xc4;
2922 i.vex.bytes[0] = 0xc4;
2926 i.vex.bytes[0] = 0xc4;
2930 i.vex.bytes[0] = 0x8f;
2934 i.vex.bytes[0] = 0x8f;
2938 i.vex.bytes[0] = 0x8f;
2944 /* The high 3 bits of the second VEX byte are 1's compliment
2945 of RXB bits from REX. */
2946 i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
2948 /* Check the REX.W bit. */
2949 w = (i.rex & REX_W) ? 1 : 0;
2950 if (i.tm.opcode_modifier.vexw)
2955 if (i.tm.opcode_modifier.vexw == VEXW1)
2959 i.vex.bytes[2] = (w << 7
2960 | register_specifier << 3
2961 | vector_length << 2
2967 process_immext (void)
2971 if (i.tm.cpu_flags.bitfield.cpusse3 && i.operands > 0)
2973 /* SSE3 Instructions have the fixed operands with an opcode
2974 suffix which is coded in the same place as an 8-bit immediate
2975 field would be. Here we check those operands and remove them
2979 for (x = 0; x < i.operands; x++)
2980 if (i.op[x].regs->reg_num != x)
2981 as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
2982 register_prefix, i.op[x].regs->reg_name, x + 1,
2988 /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
2989 which is coded in the same place as an 8-bit immediate field
2990 would be. Here we fake an 8-bit immediate operand from the
2991 opcode suffix stored in tm.extension_opcode.
2993 AVX instructions also use this encoding, for some of
2994 3 argument instructions. */
2996 gas_assert (i.imm_operands == 0
2998 || (i.tm.opcode_modifier.vex
2999 && i.operands <= 4)));
3001 exp = &im_expressions[i.imm_operands++];
3002 i.op[i.operands].imms = exp;
3003 i.types[i.operands] = imm8;
3005 exp->X_op = O_constant;
3006 exp->X_add_number = i.tm.extension_opcode;
3007 i.tm.extension_opcode = None;
3014 switch (i.tm.opcode_modifier.hleprefixok)
3019 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3020 as_bad (_("invalid instruction `%s' after `xacquire'"),
3023 as_bad (_("invalid instruction `%s' after `xrelease'"),
3027 if (i.prefix[LOCK_PREFIX])
3029 if (i.prefix[HLE_PREFIX] == XACQUIRE_PREFIX_OPCODE)
3030 as_bad (_("missing `lock' with `xacquire'"));
3032 as_bad (_("missing `lock' with `xrelease'"));
3036 case HLEPrefixRelease:
3037 if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3039 as_bad (_("instruction `%s' after `xacquire' not allowed"),
3043 if (i.mem_operands == 0
3044 || !operand_type_check (i.types[i.operands - 1], anymem))
3046 as_bad (_("memory destination needed for instruction `%s'"
3047 " after `xrelease'"), i.tm.name);
3054 /* This is the guts of the machine-dependent assembler. LINE points to a
3055 machine dependent instruction. This function is supposed to emit
3056 the frags/bytes it assembles to. */
3059 md_assemble (char *line)
3062 char mnemonic[MAX_MNEM_SIZE];
3063 const insn_template *t;
3065 /* Initialize globals. */
3066 memset (&i, '\0', sizeof (i));
3067 for (j = 0; j < MAX_OPERANDS; j++)
3068 i.reloc[j] = NO_RELOC;
3069 memset (disp_expressions, '\0', sizeof (disp_expressions));
3070 memset (im_expressions, '\0', sizeof (im_expressions));
3071 save_stack_p = save_stack;
3073 /* First parse an instruction mnemonic & call i386_operand for the operands.
3074 We assume that the scrubber has arranged it so that line[0] is the valid
3075 start of a (possibly prefixed) mnemonic. */
3077 line = parse_insn (line, mnemonic);
3081 line = parse_operands (line, mnemonic);
3086 /* Now we've parsed the mnemonic into a set of templates, and have the
3087 operands at hand. */
3089 /* All intel opcodes have reversed operands except for "bound" and
3090 "enter". We also don't reverse intersegment "jmp" and "call"
3091 instructions with 2 immediate operands so that the immediate segment
3092 precedes the offset, as it does when in AT&T mode. */
3095 && (strcmp (mnemonic, "bound") != 0)
3096 && (strcmp (mnemonic, "invlpga") != 0)
3097 && !(operand_type_check (i.types[0], imm)
3098 && operand_type_check (i.types[1], imm)))
3101 /* The order of the immediates should be reversed
3102 for 2 immediates extrq and insertq instructions */
3103 if (i.imm_operands == 2
3104 && (strcmp (mnemonic, "extrq") == 0
3105 || strcmp (mnemonic, "insertq") == 0))
3106 swap_2_operands (0, 1);
3111 /* Don't optimize displacement for movabs since it only takes 64bit
3114 && i.disp_encoding != disp_encoding_32bit
3115 && (flag_code != CODE_64BIT
3116 || strcmp (mnemonic, "movabs") != 0))
3119 /* Next, we find a template that matches the given insn,
3120 making sure the overlap of the given operands types is consistent
3121 with the template operand types. */
3123 if (!(t = match_template ()))
3126 if (sse_check != sse_check_none
3127 && !i.tm.opcode_modifier.noavx
3128 && (i.tm.cpu_flags.bitfield.cpusse
3129 || i.tm.cpu_flags.bitfield.cpusse2
3130 || i.tm.cpu_flags.bitfield.cpusse3
3131 || i.tm.cpu_flags.bitfield.cpussse3
3132 || i.tm.cpu_flags.bitfield.cpusse4_1
3133 || i.tm.cpu_flags.bitfield.cpusse4_2))
3135 (sse_check == sse_check_warning
3137 : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3140 /* Zap movzx and movsx suffix. The suffix has been set from
3141 "word ptr" or "byte ptr" on the source operand in Intel syntax
3142 or extracted from mnemonic in AT&T syntax. But we'll use
3143 the destination register to choose the suffix for encoding. */
3144 if ((i.tm.base_opcode & ~9) == 0x0fb6)
3146 /* In Intel syntax, there must be a suffix. In AT&T syntax, if
3147 there is no suffix, the default will be byte extension. */
3148 if (i.reg_operands != 2
3151 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3156 if (i.tm.opcode_modifier.fwait)
3157 if (!add_prefix (FWAIT_OPCODE))
3160 /* Check for lock without a lockable instruction. Destination operand
3161 must be memory unless it is xchg (0x86). */
3162 if (i.prefix[LOCK_PREFIX]
3163 && (!i.tm.opcode_modifier.islockable
3164 || i.mem_operands == 0
3165 || (i.tm.base_opcode != 0x86
3166 && !operand_type_check (i.types[i.operands - 1], anymem))))
3168 as_bad (_("expecting lockable instruction after `lock'"));
3172 /* Check if HLE prefix is OK. */
3173 if (i.have_hle && !check_hle ())
3176 /* Check string instruction segment overrides. */
3177 if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3179 if (!check_string ())
3181 i.disp_operands = 0;
3184 if (!process_suffix ())
3187 /* Update operand types. */
3188 for (j = 0; j < i.operands; j++)
3189 i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3191 /* Make still unresolved immediate matches conform to size of immediate
3192 given in i.suffix. */
3193 if (!finalize_imm ())
3196 if (i.types[0].bitfield.imm1)
3197 i.imm_operands = 0; /* kludge for shift insns. */
3199 /* We only need to check those implicit registers for instructions
3200 with 3 operands or less. */
3201 if (i.operands <= 3)
3202 for (j = 0; j < i.operands; j++)
3203 if (i.types[j].bitfield.inoutportreg
3204 || i.types[j].bitfield.shiftcount
3205 || i.types[j].bitfield.acc
3206 || i.types[j].bitfield.floatacc)
3209 /* ImmExt should be processed after SSE2AVX. */
3210 if (!i.tm.opcode_modifier.sse2avx
3211 && i.tm.opcode_modifier.immext)
3214 /* For insns with operands there are more diddles to do to the opcode. */
3217 if (!process_operands ())
3220 else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3222 /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc. */
3223 as_warn (_("translating to `%sp'"), i.tm.name);
3226 if (i.tm.opcode_modifier.vex)
3227 build_vex_prefix (t);
3229 /* Handle conversion of 'int $3' --> special int3 insn. XOP or FMA4
3230 instructions may define INT_OPCODE as well, so avoid this corner
3231 case for those instructions that use MODRM. */
3232 if (i.tm.base_opcode == INT_OPCODE
3233 && !i.tm.opcode_modifier.modrm
3234 && i.op[0].imms->X_add_number == 3)
3236 i.tm.base_opcode = INT3_OPCODE;
3240 if ((i.tm.opcode_modifier.jump
3241 || i.tm.opcode_modifier.jumpbyte
3242 || i.tm.opcode_modifier.jumpdword)
3243 && i.op[0].disps->X_op == O_constant)
3245 /* Convert "jmp constant" (and "call constant") to a jump (call) to
3246 the absolute address given by the constant. Since ix86 jumps and
3247 calls are pc relative, we need to generate a reloc. */
3248 i.op[0].disps->X_add_symbol = &abs_symbol;
3249 i.op[0].disps->X_op = O_symbol;
3252 if (i.tm.opcode_modifier.rex64)
3255 /* For 8 bit registers we need an empty rex prefix. Also if the
3256 instruction already has a prefix, we need to convert old
3257 registers to new ones. */
3259 if ((i.types[0].bitfield.reg8
3260 && (i.op[0].regs->reg_flags & RegRex64) != 0)
3261 || (i.types[1].bitfield.reg8
3262 && (i.op[1].regs->reg_flags & RegRex64) != 0)
3263 || ((i.types[0].bitfield.reg8
3264 || i.types[1].bitfield.reg8)
3269 i.rex |= REX_OPCODE;
3270 for (x = 0; x < 2; x++)
3272 /* Look for 8 bit operand that uses old registers. */
3273 if (i.types[x].bitfield.reg8
3274 && (i.op[x].regs->reg_flags & RegRex64) == 0)
3276 /* In case it is "hi" register, give up. */
3277 if (i.op[x].regs->reg_num > 3)
3278 as_bad (_("can't encode register '%s%s' in an "
3279 "instruction requiring REX prefix."),
3280 register_prefix, i.op[x].regs->reg_name);
3282 /* Otherwise it is equivalent to the extended register.
3283 Since the encoding doesn't change this is merely
3284 cosmetic cleanup for debug output. */
3286 i.op[x].regs = i.op[x].regs + 8;
3292 add_prefix (REX_OPCODE | i.rex);
3294 /* We are ready to output the insn. */
3299 parse_insn (char *line, char *mnemonic)
3302 char *token_start = l;
3305 const insn_template *t;
3308 /* Non-zero if we found a prefix only acceptable with string insns. */
3309 const char *expecting_string_instruction = NULL;
3314 while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3319 if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3321 as_bad (_("no such instruction: `%s'"), token_start);
3326 if (!is_space_char (*l)
3327 && *l != END_OF_INSN
3329 || (*l != PREFIX_SEPARATOR
3332 as_bad (_("invalid character %s in mnemonic"),
3333 output_invalid (*l));
3336 if (token_start == l)
3338 if (!intel_syntax && *l == PREFIX_SEPARATOR)
3339 as_bad (_("expecting prefix; got nothing"));
3341 as_bad (_("expecting mnemonic; got nothing"));
3345 /* Look up instruction (or prefix) via hash table. */
3346 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3348 if (*l != END_OF_INSN
3349 && (!is_space_char (*l) || l[1] != END_OF_INSN)
3350 && current_templates
3351 && current_templates->start->opcode_modifier.isprefix)
3353 if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3355 as_bad ((flag_code != CODE_64BIT
3356 ? _("`%s' is only supported in 64-bit mode")
3357 : _("`%s' is not supported in 64-bit mode")),
3358 current_templates->start->name);
3361 /* If we are in 16-bit mode, do not allow addr16 or data16.
3362 Similarly, in 32-bit mode, do not allow addr32 or data32. */
3363 if ((current_templates->start->opcode_modifier.size16
3364 || current_templates->start->opcode_modifier.size32)
3365 && flag_code != CODE_64BIT
3366 && (current_templates->start->opcode_modifier.size32
3367 ^ (flag_code == CODE_16BIT)))
3369 as_bad (_("redundant %s prefix"),
3370 current_templates->start->name);
3373 /* Add prefix, checking for repeated prefixes. */
3374 switch (add_prefix (current_templates->start->base_opcode))
3379 if (current_templates->start->cpu_flags.bitfield.cpuhle)
3382 expecting_string_instruction = current_templates->start->name;
3387 /* Skip past PREFIX_SEPARATOR and reset token_start. */
3394 if (!current_templates)
3396 /* Check if we should swap operand or force 32bit displacement in
3398 if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3400 else if (mnem_p - 3 == dot_p
3403 i.disp_encoding = disp_encoding_8bit;
3404 else if (mnem_p - 4 == dot_p
3408 i.disp_encoding = disp_encoding_32bit;
3413 current_templates = (const templates *) hash_find (op_hash, mnemonic);
3416 if (!current_templates)
3419 /* See if we can get a match by trimming off a suffix. */
3422 case WORD_MNEM_SUFFIX:
3423 if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3424 i.suffix = SHORT_MNEM_SUFFIX;
3426 case BYTE_MNEM_SUFFIX:
3427 case QWORD_MNEM_SUFFIX:
3428 i.suffix = mnem_p[-1];
3430 current_templates = (const templates *) hash_find (op_hash,
3433 case SHORT_MNEM_SUFFIX:
3434 case LONG_MNEM_SUFFIX:
3437 i.suffix = mnem_p[-1];
3439 current_templates = (const templates *) hash_find (op_hash,
3448 if (intel_float_operand (mnemonic) == 1)
3449 i.suffix = SHORT_MNEM_SUFFIX;
3451 i.suffix = LONG_MNEM_SUFFIX;
3453 current_templates = (const templates *) hash_find (op_hash,
3458 if (!current_templates)
3460 as_bad (_("no such instruction: `%s'"), token_start);
3465 if (current_templates->start->opcode_modifier.jump
3466 || current_templates->start->opcode_modifier.jumpbyte)
3468 /* Check for a branch hint. We allow ",pt" and ",pn" for
3469 predict taken and predict not taken respectively.
3470 I'm not sure that branch hints actually do anything on loop
3471 and jcxz insns (JumpByte) for current Pentium4 chips. They
3472 may work in the future and it doesn't hurt to accept them
3474 if (l[0] == ',' && l[1] == 'p')
3478 if (!add_prefix (DS_PREFIX_OPCODE))
3482 else if (l[2] == 'n')
3484 if (!add_prefix (CS_PREFIX_OPCODE))
3490 /* Any other comma loses. */
3493 as_bad (_("invalid character %s in mnemonic"),
3494 output_invalid (*l));
3498 /* Check if instruction is supported on specified architecture. */
3500 for (t = current_templates->start; t < current_templates->end; ++t)
3502 supported |= cpu_flags_match (t);
3503 if (supported == CPU_FLAGS_PERFECT_MATCH)
3507 if (!(supported & CPU_FLAGS_64BIT_MATCH))
3509 as_bad (flag_code == CODE_64BIT
3510 ? _("`%s' is not supported in 64-bit mode")
3511 : _("`%s' is only supported in 64-bit mode"),
3512 current_templates->start->name);
3515 if (supported != CPU_FLAGS_PERFECT_MATCH)
3517 as_bad (_("`%s' is not supported on `%s%s'"),
3518 current_templates->start->name,
3519 cpu_arch_name ? cpu_arch_name : default_arch,
3520 cpu_sub_arch_name ? cpu_sub_arch_name : "");
3525 if (!cpu_arch_flags.bitfield.cpui386
3526 && (flag_code != CODE_16BIT))
3528 as_warn (_("use .code16 to ensure correct addressing mode"));
3531 /* Check for rep/repne without a string instruction. */
3532 if (expecting_string_instruction)
3534 static templates override;
3536 for (t = current_templates->start; t < current_templates->end; ++t)
3537 if (t->opcode_modifier.isstring)
3539 if (t >= current_templates->end)
3541 as_bad (_("expecting string instruction after `%s'"),
3542 expecting_string_instruction);
3545 for (override.start = t; t < current_templates->end; ++t)
3546 if (!t->opcode_modifier.isstring)
3549 current_templates = &override;
3556 parse_operands (char *l, const char *mnemonic)
3560 /* 1 if operand is pending after ','. */
3561 unsigned int expecting_operand = 0;
3563 /* Non-zero if operand parens not balanced. */
3564 unsigned int paren_not_balanced;
3566 while (*l != END_OF_INSN)
3568 /* Skip optional white space before operand. */
3569 if (is_space_char (*l))
3571 if (!is_operand_char (*l) && *l != END_OF_INSN)
3573 as_bad (_("invalid character %s before operand %d"),
3574 output_invalid (*l),
3578 token_start = l; /* after white space */
3579 paren_not_balanced = 0;
3580 while (paren_not_balanced || *l != ',')
3582 if (*l == END_OF_INSN)
3584 if (paren_not_balanced)
3587 as_bad (_("unbalanced parenthesis in operand %d."),
3590 as_bad (_("unbalanced brackets in operand %d."),
3595 break; /* we are done */
3597 else if (!is_operand_char (*l) && !is_space_char (*l))
3599 as_bad (_("invalid character %s in operand %d"),
3600 output_invalid (*l),
3607 ++paren_not_balanced;
3609 --paren_not_balanced;
3614 ++paren_not_balanced;
3616 --paren_not_balanced;
3620 if (l != token_start)
3621 { /* Yes, we've read in another operand. */
3622 unsigned int operand_ok;
3623 this_operand = i.operands++;
3624 i.types[this_operand].bitfield.unspecified = 1;
3625 if (i.operands > MAX_OPERANDS)
3627 as_bad (_("spurious operands; (%d operands/instruction max)"),
3631 /* Now parse operand adding info to 'i' as we go along. */
3632 END_STRING_AND_SAVE (l);
3636 i386_intel_operand (token_start,
3637 intel_float_operand (mnemonic));
3639 operand_ok = i386_att_operand (token_start);
3641 RESTORE_END_STRING (l);
3647 if (expecting_operand)
3649 expecting_operand_after_comma:
3650 as_bad (_("expecting operand after ','; got nothing"));
3655 as_bad (_("expecting operand before ','; got nothing"));
3660 /* Now *l must be either ',' or END_OF_INSN. */
3663 if (*++l == END_OF_INSN)
3665 /* Just skip it, if it's \n complain. */
3666 goto expecting_operand_after_comma;
3668 expecting_operand = 1;
3675 swap_2_operands (int xchg1, int xchg2)
3677 union i386_op temp_op;
3678 i386_operand_type temp_type;
3679 enum bfd_reloc_code_real temp_reloc;
3681 temp_type = i.types[xchg2];
3682 i.types[xchg2] = i.types[xchg1];
3683 i.types[xchg1] = temp_type;
3684 temp_op = i.op[xchg2];
3685 i.op[xchg2] = i.op[xchg1];
3686 i.op[xchg1] = temp_op;
3687 temp_reloc = i.reloc[xchg2];
3688 i.reloc[xchg2] = i.reloc[xchg1];
3689 i.reloc[xchg1] = temp_reloc;
3693 swap_operands (void)
3699 swap_2_operands (1, i.operands - 2);
3702 swap_2_operands (0, i.operands - 1);
3708 if (i.mem_operands == 2)
3710 const seg_entry *temp_seg;
3711 temp_seg = i.seg[0];
3712 i.seg[0] = i.seg[1];
3713 i.seg[1] = temp_seg;
3717 /* Try to ensure constant immediates are represented in the smallest
3722 char guess_suffix = 0;
3726 guess_suffix = i.suffix;
3727 else if (i.reg_operands)
3729 /* Figure out a suffix from the last register operand specified.
3730 We can't do this properly yet, ie. excluding InOutPortReg,
3731 but the following works for instructions with immediates.
3732 In any case, we can't set i.suffix yet. */
3733 for (op = i.operands; --op >= 0;)
3734 if (i.types[op].bitfield.reg8)
3736 guess_suffix = BYTE_MNEM_SUFFIX;
3739 else if (i.types[op].bitfield.reg16)
3741 guess_suffix = WORD_MNEM_SUFFIX;
3744 else if (i.types[op].bitfield.reg32)
3746 guess_suffix = LONG_MNEM_SUFFIX;
3749 else if (i.types[op].bitfield.reg64)
3751 guess_suffix = QWORD_MNEM_SUFFIX;
3755 else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
3756 guess_suffix = WORD_MNEM_SUFFIX;
3758 for (op = i.operands; --op >= 0;)
3759 if (operand_type_check (i.types[op], imm))
3761 switch (i.op[op].imms->X_op)
3764 /* If a suffix is given, this operand may be shortened. */
3765 switch (guess_suffix)
3767 case LONG_MNEM_SUFFIX:
3768 i.types[op].bitfield.imm32 = 1;
3769 i.types[op].bitfield.imm64 = 1;
3771 case WORD_MNEM_SUFFIX:
3772 i.types[op].bitfield.imm16 = 1;
3773 i.types[op].bitfield.imm32 = 1;
3774 i.types[op].bitfield.imm32s = 1;
3775 i.types[op].bitfield.imm64 = 1;
3777 case BYTE_MNEM_SUFFIX:
3778 i.types[op].bitfield.imm8 = 1;
3779 i.types[op].bitfield.imm8s = 1;
3780 i.types[op].bitfield.imm16 = 1;
3781 i.types[op].bitfield.imm32 = 1;
3782 i.types[op].bitfield.imm32s = 1;
3783 i.types[op].bitfield.imm64 = 1;
3787 /* If this operand is at most 16 bits, convert it
3788 to a signed 16 bit number before trying to see
3789 whether it will fit in an even smaller size.
3790 This allows a 16-bit operand such as $0xffe0 to
3791 be recognised as within Imm8S range. */
3792 if ((i.types[op].bitfield.imm16)
3793 && (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
3795 i.op[op].imms->X_add_number =
3796 (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
3798 if ((i.types[op].bitfield.imm32)
3799 && ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
3802 i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
3803 ^ ((offsetT) 1 << 31))
3804 - ((offsetT) 1 << 31));
3807 = operand_type_or (i.types[op],
3808 smallest_imm_type (i.op[op].imms->X_add_number));
3810 /* We must avoid matching of Imm32 templates when 64bit
3811 only immediate is available. */
3812 if (guess_suffix == QWORD_MNEM_SUFFIX)
3813 i.types[op].bitfield.imm32 = 0;
3820 /* Symbols and expressions. */
3822 /* Convert symbolic operand to proper sizes for matching, but don't
3823 prevent matching a set of insns that only supports sizes other
3824 than those matching the insn suffix. */
3826 i386_operand_type mask, allowed;
3827 const insn_template *t;
3829 operand_type_set (&mask, 0);
3830 operand_type_set (&allowed, 0);
3832 for (t = current_templates->start;
3833 t < current_templates->end;
3835 allowed = operand_type_or (allowed,
3836 t->operand_types[op]);
3837 switch (guess_suffix)
3839 case QWORD_MNEM_SUFFIX:
3840 mask.bitfield.imm64 = 1;
3841 mask.bitfield.imm32s = 1;
3843 case LONG_MNEM_SUFFIX:
3844 mask.bitfield.imm32 = 1;
3846 case WORD_MNEM_SUFFIX:
3847 mask.bitfield.imm16 = 1;
3849 case BYTE_MNEM_SUFFIX:
3850 mask.bitfield.imm8 = 1;
3855 allowed = operand_type_and (mask, allowed);
3856 if (!operand_type_all_zero (&allowed))
3857 i.types[op] = operand_type_and (i.types[op], mask);
3864 /* Try to use the smallest displacement type too. */
3866 optimize_disp (void)
3870 for (op = i.operands; --op >= 0;)
3871 if (operand_type_check (i.types[op], disp))
3873 if (i.op[op].disps->X_op == O_constant)
3875 offsetT op_disp = i.op[op].disps->X_add_number;
3877 if (i.types[op].bitfield.disp16
3878 && (op_disp & ~(offsetT) 0xffff) == 0)
3880 /* If this operand is at most 16 bits, convert
3881 to a signed 16 bit number and don't use 64bit
3883 op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
3884 i.types[op].bitfield.disp64 = 0;
3886 if (i.types[op].bitfield.disp32
3887 && (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
3889 /* If this operand is at most 32 bits, convert
3890 to a signed 32 bit number and don't use 64bit
3892 op_disp &= (((offsetT) 2 << 31) - 1);
3893 op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
3894 i.types[op].bitfield.disp64 = 0;
3896 if (!op_disp && i.types[op].bitfield.baseindex)
3898 i.types[op].bitfield.disp8 = 0;
3899 i.types[op].bitfield.disp16 = 0;
3900 i.types[op].bitfield.disp32 = 0;
3901 i.types[op].bitfield.disp32s = 0;
3902 i.types[op].bitfield.disp64 = 0;
3906 else if (flag_code == CODE_64BIT)
3908 if (fits_in_signed_long (op_disp))
3910 i.types[op].bitfield.disp64 = 0;
3911 i.types[op].bitfield.disp32s = 1;
3913 if (i.prefix[ADDR_PREFIX]
3914 && fits_in_unsigned_long (op_disp))
3915 i.types[op].bitfield.disp32 = 1;
3917 if ((i.types[op].bitfield.disp32
3918 || i.types[op].bitfield.disp32s
3919 || i.types[op].bitfield.disp16)
3920 && fits_in_signed_byte (op_disp))
3921 i.types[op].bitfield.disp8 = 1;
3923 else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
3924 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
3926 fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
3927 i.op[op].disps, 0, i.reloc[op]);
3928 i.types[op].bitfield.disp8 = 0;
3929 i.types[op].bitfield.disp16 = 0;
3930 i.types[op].bitfield.disp32 = 0;
3931 i.types[op].bitfield.disp32s = 0;
3932 i.types[op].bitfield.disp64 = 0;
3935 /* We only support 64bit displacement on constants. */
3936 i.types[op].bitfield.disp64 = 0;
3940 /* Check if operands are valid for the instruction. */
3943 check_VecOperands (const insn_template *t)
3945 /* Without VSIB byte, we can't have a vector register for index. */
3946 if (!t->opcode_modifier.vecsib
3948 && (i.index_reg->reg_type.bitfield.regxmm
3949 || i.index_reg->reg_type.bitfield.regymm))
3951 i.error = unsupported_vector_index_register;
3955 /* For VSIB byte, we need a vector register for index and no PC
3956 relative addressing is allowed. */
3957 if (t->opcode_modifier.vecsib
3959 || !((t->opcode_modifier.vecsib == VecSIB128
3960 && i.index_reg->reg_type.bitfield.regxmm)
3961 || (t->opcode_modifier.vecsib == VecSIB256
3962 && i.index_reg->reg_type.bitfield.regymm))
3963 || (i.base_reg && i.base_reg->reg_num == RegRip)))
3965 i.error = invalid_vsib_address;
3972 /* Check if operands are valid for the instruction. Update VEX
3976 VEX_check_operands (const insn_template *t)
3978 if (!t->opcode_modifier.vex)
3981 /* Only check VEX_Imm4, which must be the first operand. */
3982 if (t->operand_types[0].bitfield.vec_imm4)
3984 if (i.op[0].imms->X_op != O_constant
3985 || !fits_in_imm4 (i.op[0].imms->X_add_number))
3991 /* Turn off Imm8 so that update_imm won't complain. */
3992 i.types[0] = vec_imm4;
3998 static const insn_template *
3999 match_template (void)
4001 /* Points to template once we've found it. */
4002 const insn_template *t;
4003 i386_operand_type overlap0, overlap1, overlap2, overlap3;
4004 i386_operand_type overlap4;
4005 unsigned int found_reverse_match;
4006 i386_opcode_modifier suffix_check;
4007 i386_operand_type operand_types [MAX_OPERANDS];
4008 int addr_prefix_disp;
4010 unsigned int found_cpu_match;
4011 unsigned int check_register;
4013 #if MAX_OPERANDS != 5
4014 # error "MAX_OPERANDS must be 5."
4017 found_reverse_match = 0;
4018 addr_prefix_disp = -1;
4020 memset (&suffix_check, 0, sizeof (suffix_check));
4021 if (i.suffix == BYTE_MNEM_SUFFIX)
4022 suffix_check.no_bsuf = 1;
4023 else if (i.suffix == WORD_MNEM_SUFFIX)
4024 suffix_check.no_wsuf = 1;
4025 else if (i.suffix == SHORT_MNEM_SUFFIX)
4026 suffix_check.no_ssuf = 1;
4027 else if (i.suffix == LONG_MNEM_SUFFIX)
4028 suffix_check.no_lsuf = 1;
4029 else if (i.suffix == QWORD_MNEM_SUFFIX)
4030 suffix_check.no_qsuf = 1;
4031 else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4032 suffix_check.no_ldsuf = 1;
4034 /* Must have right number of operands. */
4035 i.error = number_of_operands_mismatch;
4037 for (t = current_templates->start; t < current_templates->end; t++)
4039 addr_prefix_disp = -1;
4041 if (i.operands != t->operands)
4044 /* Check processor support. */
4045 i.error = unsupported;
4046 found_cpu_match = (cpu_flags_match (t)
4047 == CPU_FLAGS_PERFECT_MATCH);
4048 if (!found_cpu_match)
4051 /* Check old gcc support. */
4052 i.error = old_gcc_only;
4053 if (!old_gcc && t->opcode_modifier.oldgcc)
4056 /* Check AT&T mnemonic. */
4057 i.error = unsupported_with_intel_mnemonic;
4058 if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4061 /* Check AT&T/Intel syntax. */
4062 i.error = unsupported_syntax;
4063 if ((intel_syntax && t->opcode_modifier.attsyntax)
4064 || (!intel_syntax && t->opcode_modifier.intelsyntax))
4067 /* Check the suffix, except for some instructions in intel mode. */
4068 i.error = invalid_instruction_suffix;
4069 if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4070 && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4071 || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4072 || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4073 || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4074 || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4075 || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4078 if (!operand_size_match (t))
4081 for (j = 0; j < MAX_OPERANDS; j++)
4082 operand_types[j] = t->operand_types[j];
4084 /* In general, don't allow 64-bit operands in 32-bit mode. */
4085 if (i.suffix == QWORD_MNEM_SUFFIX
4086 && flag_code != CODE_64BIT
4088 ? (!t->opcode_modifier.ignoresize
4089 && !intel_float_operand (t->name))
4090 : intel_float_operand (t->name) != 2)
4091 && ((!operand_types[0].bitfield.regmmx
4092 && !operand_types[0].bitfield.regxmm
4093 && !operand_types[0].bitfield.regymm)
4094 || (!operand_types[t->operands > 1].bitfield.regmmx
4095 && !!operand_types[t->operands > 1].bitfield.regxmm
4096 && !!operand_types[t->operands > 1].bitfield.regymm))
4097 && (t->base_opcode != 0x0fc7
4098 || t->extension_opcode != 1 /* cmpxchg8b */))
4101 /* In general, don't allow 32-bit operands on pre-386. */
4102 else if (i.suffix == LONG_MNEM_SUFFIX
4103 && !cpu_arch_flags.bitfield.cpui386
4105 ? (!t->opcode_modifier.ignoresize
4106 && !intel_float_operand (t->name))
4107 : intel_float_operand (t->name) != 2)
4108 && ((!operand_types[0].bitfield.regmmx
4109 && !operand_types[0].bitfield.regxmm)
4110 || (!operand_types[t->operands > 1].bitfield.regmmx
4111 && !!operand_types[t->operands > 1].bitfield.regxmm)))
4114 /* Do not verify operands when there are none. */
4118 /* We've found a match; break out of loop. */
4122 /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4123 into Disp32/Disp16/Disp32 operand. */
4124 if (i.prefix[ADDR_PREFIX] != 0)
4126 /* There should be only one Disp operand. */
4130 for (j = 0; j < MAX_OPERANDS; j++)
4132 if (operand_types[j].bitfield.disp16)
4134 addr_prefix_disp = j;
4135 operand_types[j].bitfield.disp32 = 1;
4136 operand_types[j].bitfield.disp16 = 0;
4142 for (j = 0; j < MAX_OPERANDS; j++)
4144 if (operand_types[j].bitfield.disp32)
4146 addr_prefix_disp = j;
4147 operand_types[j].bitfield.disp32 = 0;
4148 operand_types[j].bitfield.disp16 = 1;
4154 for (j = 0; j < MAX_OPERANDS; j++)
4156 if (operand_types[j].bitfield.disp64)
4158 addr_prefix_disp = j;
4159 operand_types[j].bitfield.disp64 = 0;
4160 operand_types[j].bitfield.disp32 = 1;
4168 /* We check register size if needed. */
4169 check_register = t->opcode_modifier.checkregsize;
4170 overlap0 = operand_type_and (i.types[0], operand_types[0]);
4171 switch (t->operands)
4174 if (!operand_type_match (overlap0, i.types[0]))
4178 /* xchg %eax, %eax is a special case. It is an aliase for nop
4179 only in 32bit mode and we can use opcode 0x90. In 64bit
4180 mode, we can't use 0x90 for xchg %eax, %eax since it should
4181 zero-extend %eax to %rax. */
4182 if (flag_code == CODE_64BIT
4183 && t->base_opcode == 0x90
4184 && operand_type_equal (&i.types [0], &acc32)
4185 && operand_type_equal (&i.types [1], &acc32))
4189 /* If we swap operand in encoding, we either match
4190 the next one or reverse direction of operands. */
4191 if (t->opcode_modifier.s)
4193 else if (t->opcode_modifier.d)
4198 /* If we swap operand in encoding, we match the next one. */
4199 if (i.swap_operand && t->opcode_modifier.s)
4203 overlap1 = operand_type_and (i.types[1], operand_types[1]);
4204 if (!operand_type_match (overlap0, i.types[0])
4205 || !operand_type_match (overlap1, i.types[1])
4207 && !operand_type_register_match (overlap0, i.types[0],
4209 overlap1, i.types[1],
4212 /* Check if other direction is valid ... */
4213 if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4217 /* Try reversing direction of operands. */
4218 overlap0 = operand_type_and (i.types[0], operand_types[1]);
4219 overlap1 = operand_type_and (i.types[1], operand_types[0]);
4220 if (!operand_type_match (overlap0, i.types[0])
4221 || !operand_type_match (overlap1, i.types[1])
4223 && !operand_type_register_match (overlap0,
4230 /* Does not match either direction. */
4233 /* found_reverse_match holds which of D or FloatDR
4235 if (t->opcode_modifier.d)
4236 found_reverse_match = Opcode_D;
4237 else if (t->opcode_modifier.floatd)
4238 found_reverse_match = Opcode_FloatD;
4240 found_reverse_match = 0;
4241 if (t->opcode_modifier.floatr)
4242 found_reverse_match |= Opcode_FloatR;
4246 /* Found a forward 2 operand match here. */
4247 switch (t->operands)
4250 overlap4 = operand_type_and (i.types[4],
4253 overlap3 = operand_type_and (i.types[3],
4256 overlap2 = operand_type_and (i.types[2],
4261 switch (t->operands)
4264 if (!operand_type_match (overlap4, i.types[4])
4265 || !operand_type_register_match (overlap3,
4273 if (!operand_type_match (overlap3, i.types[3])
4275 && !operand_type_register_match (overlap2,
4283 /* Here we make use of the fact that there are no
4284 reverse match 3 operand instructions, and all 3
4285 operand instructions only need to be checked for
4286 register consistency between operands 2 and 3. */
4287 if (!operand_type_match (overlap2, i.types[2])
4289 && !operand_type_register_match (overlap1,
4299 /* Found either forward/reverse 2, 3 or 4 operand match here:
4300 slip through to break. */
4302 if (!found_cpu_match)
4304 found_reverse_match = 0;
4308 /* Check if vector operands are valid. */
4309 if (check_VecOperands (t))
4312 /* Check if VEX operands are valid. */
4313 if (VEX_check_operands (t))
4316 /* We've found a match; break out of loop. */
4320 if (t == current_templates->end)
4322 /* We found no match. */
4323 const char *err_msg;
4328 case operand_size_mismatch:
4329 err_msg = _("operand size mismatch");
4331 case operand_type_mismatch:
4332 err_msg = _("operand type mismatch");
4334 case register_type_mismatch:
4335 err_msg = _("register type mismatch");
4337 case number_of_operands_mismatch:
4338 err_msg = _("number of operands mismatch");
4340 case invalid_instruction_suffix:
4341 err_msg = _("invalid instruction suffix");
4344 err_msg = _("Imm4 isn't the first operand");
4347 err_msg = _("only supported with old gcc");
4349 case unsupported_with_intel_mnemonic:
4350 err_msg = _("unsupported with Intel mnemonic");
4352 case unsupported_syntax:
4353 err_msg = _("unsupported syntax");
4356 as_bad (_("unsupported `%s'"),
4357 current_templates->start->name);
4359 case invalid_vsib_address:
4360 err_msg = _("invalid VSIB address");
4362 case unsupported_vector_index_register:
4363 err_msg = _("unsupported vector index register");
4366 as_bad (_("%s for `%s'"), err_msg,
4367 current_templates->start->name);
4371 if (!quiet_warnings)
4374 && (i.types[0].bitfield.jumpabsolute
4375 != operand_types[0].bitfield.jumpabsolute))
4377 as_warn (_("indirect %s without `*'"), t->name);
4380 if (t->opcode_modifier.isprefix
4381 && t->opcode_modifier.ignoresize)
4383 /* Warn them that a data or address size prefix doesn't
4384 affect assembly of the next line of code. */
4385 as_warn (_("stand-alone `%s' prefix"), t->name);
4389 /* Copy the template we found. */
4392 if (addr_prefix_disp != -1)
4393 i.tm.operand_types[addr_prefix_disp]
4394 = operand_types[addr_prefix_disp];
4396 if (found_reverse_match)
4398 /* If we found a reverse match we must alter the opcode
4399 direction bit. found_reverse_match holds bits to change
4400 (different for int & float insns). */
4402 i.tm.base_opcode ^= found_reverse_match;
4404 i.tm.operand_types[0] = operand_types[1];
4405 i.tm.operand_types[1] = operand_types[0];
4414 int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
4415 if (i.tm.operand_types[mem_op].bitfield.esseg)
4417 if (i.seg[0] != NULL && i.seg[0] != &es)
4419 as_bad (_("`%s' operand %d must use `%ses' segment"),
4425 /* There's only ever one segment override allowed per instruction.
4426 This instruction possibly has a legal segment override on the
4427 second operand, so copy the segment to where non-string
4428 instructions store it, allowing common code. */
4429 i.seg[0] = i.seg[1];
4431 else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
4433 if (i.seg[1] != NULL && i.seg[1] != &es)
4435 as_bad (_("`%s' operand %d must use `%ses' segment"),
4446 process_suffix (void)
4448 /* If matched instruction specifies an explicit instruction mnemonic
4450 if (i.tm.opcode_modifier.size16)
4451 i.suffix = WORD_MNEM_SUFFIX;
4452 else if (i.tm.opcode_modifier.size32)
4453 i.suffix = LONG_MNEM_SUFFIX;
4454 else if (i.tm.opcode_modifier.size64)
4455 i.suffix = QWORD_MNEM_SUFFIX;
4456 else if (i.reg_operands)
4458 /* If there's no instruction mnemonic suffix we try to invent one
4459 based on register operands. */
4462 /* We take i.suffix from the last register operand specified,
4463 Destination register type is more significant than source
4464 register type. crc32 in SSE4.2 prefers source register
4466 if (i.tm.base_opcode == 0xf20f38f1)
4468 if (i.types[0].bitfield.reg16)
4469 i.suffix = WORD_MNEM_SUFFIX;
4470 else if (i.types[0].bitfield.reg32)
4471 i.suffix = LONG_MNEM_SUFFIX;
4472 else if (i.types[0].bitfield.reg64)
4473 i.suffix = QWORD_MNEM_SUFFIX;
4475 else if (i.tm.base_opcode == 0xf20f38f0)
4477 if (i.types[0].bitfield.reg8)
4478 i.suffix = BYTE_MNEM_SUFFIX;
4485 if (i.tm.base_opcode == 0xf20f38f1
4486 || i.tm.base_opcode == 0xf20f38f0)
4488 /* We have to know the operand size for crc32. */
4489 as_bad (_("ambiguous memory operand size for `%s`"),
4494 for (op = i.operands; --op >= 0;)
4495 if (!i.tm.operand_types[op].bitfield.inoutportreg)
4497 if (i.types[op].bitfield.reg8)
4499 i.suffix = BYTE_MNEM_SUFFIX;
4502 else if (i.types[op].bitfield.reg16)
4504 i.suffix = WORD_MNEM_SUFFIX;
4507 else if (i.types[op].bitfield.reg32)
4509 i.suffix = LONG_MNEM_SUFFIX;
4512 else if (i.types[op].bitfield.reg64)
4514 i.suffix = QWORD_MNEM_SUFFIX;
4520 else if (i.suffix == BYTE_MNEM_SUFFIX)
4523 && i.tm.opcode_modifier.ignoresize
4524 && i.tm.opcode_modifier.no_bsuf)
4526 else if (!check_byte_reg ())
4529 else if (i.suffix == LONG_MNEM_SUFFIX)
4532 && i.tm.opcode_modifier.ignoresize
4533 && i.tm.opcode_modifier.no_lsuf)
4535 else if (!check_long_reg ())
4538 else if (i.suffix == QWORD_MNEM_SUFFIX)
4541 && i.tm.opcode_modifier.ignoresize
4542 && i.tm.opcode_modifier.no_qsuf)
4544 else if (!check_qword_reg ())
4547 else if (i.suffix == WORD_MNEM_SUFFIX)
4550 && i.tm.opcode_modifier.ignoresize
4551 && i.tm.opcode_modifier.no_wsuf)
4553 else if (!check_word_reg ())
4556 else if (i.suffix == XMMWORD_MNEM_SUFFIX
4557 || i.suffix == YMMWORD_MNEM_SUFFIX)
4559 /* Skip if the instruction has x/y suffix. match_template
4560 should check if it is a valid suffix. */
4562 else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
4563 /* Do nothing if the instruction is going to ignore the prefix. */
4568 else if (i.tm.opcode_modifier.defaultsize
4570 /* exclude fldenv/frstor/fsave/fstenv */
4571 && i.tm.opcode_modifier.no_ssuf)
4573 i.suffix = stackop_size;
4575 else if (intel_syntax
4577 && (i.tm.operand_types[0].bitfield.jumpabsolute
4578 || i.tm.opcode_modifier.jumpbyte
4579 || i.tm.opcode_modifier.jumpintersegment
4580 || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
4581 && i.tm.extension_opcode <= 3)))
4586 if (!i.tm.opcode_modifier.no_qsuf)
4588 i.suffix = QWORD_MNEM_SUFFIX;
4592 if (!i.tm.opcode_modifier.no_lsuf)
4593 i.suffix = LONG_MNEM_SUFFIX;
4596 if (!i.tm.opcode_modifier.no_wsuf)
4597 i.suffix = WORD_MNEM_SUFFIX;
4606 if (i.tm.opcode_modifier.w)
4608 as_bad (_("no instruction mnemonic suffix given and "
4609 "no register operands; can't size instruction"));
4615 unsigned int suffixes;
4617 suffixes = !i.tm.opcode_modifier.no_bsuf;
4618 if (!i.tm.opcode_modifier.no_wsuf)
4620 if (!i.tm.opcode_modifier.no_lsuf)
4622 if (!i.tm.opcode_modifier.no_ldsuf)
4624 if (!i.tm.opcode_modifier.no_ssuf)
4626 if (!i.tm.opcode_modifier.no_qsuf)
4629 /* There are more than suffix matches. */
4630 if (i.tm.opcode_modifier.w
4631 || ((suffixes & (suffixes - 1))
4632 && !i.tm.opcode_modifier.defaultsize
4633 && !i.tm.opcode_modifier.ignoresize))
4635 as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
4641 /* Change the opcode based on the operand size given by i.suffix;
4642 We don't need to change things for byte insns. */
4645 && i.suffix != BYTE_MNEM_SUFFIX
4646 && i.suffix != XMMWORD_MNEM_SUFFIX
4647 && i.suffix != YMMWORD_MNEM_SUFFIX)
4649 /* It's not a byte, select word/dword operation. */
4650 if (i.tm.opcode_modifier.w)
4652 if (i.tm.opcode_modifier.shortform)
4653 i.tm.base_opcode |= 8;
4655 i.tm.base_opcode |= 1;
4658 /* Now select between word & dword operations via the operand
4659 size prefix, except for instructions that will ignore this
4661 if (i.tm.opcode_modifier.addrprefixop0)
4663 /* The address size override prefix changes the size of the
4665 if ((flag_code == CODE_32BIT
4666 && i.op->regs[0].reg_type.bitfield.reg16)
4667 || (flag_code != CODE_32BIT
4668 && i.op->regs[0].reg_type.bitfield.reg32))
4669 if (!add_prefix (ADDR_PREFIX_OPCODE))
4672 else if (i.suffix != QWORD_MNEM_SUFFIX
4673 && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
4674 && !i.tm.opcode_modifier.ignoresize
4675 && !i.tm.opcode_modifier.floatmf
4676 && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
4677 || (flag_code == CODE_64BIT
4678 && i.tm.opcode_modifier.jumpbyte)))
4680 unsigned int prefix = DATA_PREFIX_OPCODE;
4682 if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
4683 prefix = ADDR_PREFIX_OPCODE;
4685 if (!add_prefix (prefix))
4689 /* Set mode64 for an operand. */
4690 if (i.suffix == QWORD_MNEM_SUFFIX
4691 && flag_code == CODE_64BIT
4692 && !i.tm.opcode_modifier.norex64)
4694 /* Special case for xchg %rax,%rax. It is NOP and doesn't
4695 need rex64. cmpxchg8b is also a special case. */
4696 if (! (i.operands == 2
4697 && i.tm.base_opcode == 0x90
4698 && i.tm.extension_opcode == None
4699 && operand_type_equal (&i.types [0], &acc64)
4700 && operand_type_equal (&i.types [1], &acc64))
4701 && ! (i.operands == 1
4702 && i.tm.base_opcode == 0xfc7
4703 && i.tm.extension_opcode == 1
4704 && !operand_type_check (i.types [0], reg)
4705 && operand_type_check (i.types [0], anymem)))
4709 /* Size floating point instruction. */
4710 if (i.suffix == LONG_MNEM_SUFFIX)
4711 if (i.tm.opcode_modifier.floatmf)
4712 i.tm.base_opcode ^= 4;
4719 check_byte_reg (void)
4723 for (op = i.operands; --op >= 0;)
4725 /* If this is an eight bit register, it's OK. If it's the 16 or
4726 32 bit version of an eight bit register, we will just use the
4727 low portion, and that's OK too. */
4728 if (i.types[op].bitfield.reg8)
4731 /* crc32 doesn't generate this warning. */
4732 if (i.tm.base_opcode == 0xf20f38f0)
4735 if ((i.types[op].bitfield.reg16
4736 || i.types[op].bitfield.reg32
4737 || i.types[op].bitfield.reg64)
4738 && i.op[op].regs->reg_num < 4)
4740 /* Prohibit these changes in the 64bit mode, since the
4741 lowering is more complicated. */
4742 if (flag_code == CODE_64BIT
4743 && !i.tm.operand_types[op].bitfield.inoutportreg)
4745 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4746 register_prefix, i.op[op].regs->reg_name,
4750 #if REGISTER_WARNINGS
4752 && !i.tm.operand_types[op].bitfield.inoutportreg)
4753 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4755 (i.op[op].regs + (i.types[op].bitfield.reg16
4756 ? REGNAM_AL - REGNAM_AX
4757 : REGNAM_AL - REGNAM_EAX))->reg_name,
4759 i.op[op].regs->reg_name,
4764 /* Any other register is bad. */
4765 if (i.types[op].bitfield.reg16
4766 || i.types[op].bitfield.reg32
4767 || i.types[op].bitfield.reg64
4768 || i.types[op].bitfield.regmmx
4769 || i.types[op].bitfield.regxmm
4770 || i.types[op].bitfield.regymm
4771 || i.types[op].bitfield.sreg2
4772 || i.types[op].bitfield.sreg3
4773 || i.types[op].bitfield.control
4774 || i.types[op].bitfield.debug
4775 || i.types[op].bitfield.test
4776 || i.types[op].bitfield.floatreg
4777 || i.types[op].bitfield.floatacc)
4779 as_bad (_("`%s%s' not allowed with `%s%c'"),
4781 i.op[op].regs->reg_name,
4791 check_long_reg (void)
4795 for (op = i.operands; --op >= 0;)
4796 /* Reject eight bit registers, except where the template requires
4797 them. (eg. movzb) */
4798 if (i.types[op].bitfield.reg8
4799 && (i.tm.operand_types[op].bitfield.reg16
4800 || i.tm.operand_types[op].bitfield.reg32
4801 || i.tm.operand_types[op].bitfield.acc))
4803 as_bad (_("`%s%s' not allowed with `%s%c'"),
4805 i.op[op].regs->reg_name,
4810 /* Warn if the e prefix on a general reg is missing. */
4811 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4812 && i.types[op].bitfield.reg16
4813 && (i.tm.operand_types[op].bitfield.reg32
4814 || i.tm.operand_types[op].bitfield.acc))
4816 /* Prohibit these changes in the 64bit mode, since the
4817 lowering is more complicated. */
4818 if (flag_code == CODE_64BIT)
4820 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4821 register_prefix, i.op[op].regs->reg_name,
4825 #if REGISTER_WARNINGS
4827 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4829 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
4831 i.op[op].regs->reg_name,
4835 /* Warn if the r prefix on a general reg is missing. */
4836 else if (i.types[op].bitfield.reg64
4837 && (i.tm.operand_types[op].bitfield.reg32
4838 || i.tm.operand_types[op].bitfield.acc))
4841 && i.tm.opcode_modifier.toqword
4842 && !i.types[0].bitfield.regxmm)
4844 /* Convert to QWORD. We want REX byte. */
4845 i.suffix = QWORD_MNEM_SUFFIX;
4849 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4850 register_prefix, i.op[op].regs->reg_name,
4859 check_qword_reg (void)
4863 for (op = i.operands; --op >= 0; )
4864 /* Reject eight bit registers, except where the template requires
4865 them. (eg. movzb) */
4866 if (i.types[op].bitfield.reg8
4867 && (i.tm.operand_types[op].bitfield.reg16
4868 || i.tm.operand_types[op].bitfield.reg32
4869 || i.tm.operand_types[op].bitfield.acc))
4871 as_bad (_("`%s%s' not allowed with `%s%c'"),
4873 i.op[op].regs->reg_name,
4878 /* Warn if the e prefix on a general reg is missing. */
4879 else if ((i.types[op].bitfield.reg16
4880 || i.types[op].bitfield.reg32)
4881 && (i.tm.operand_types[op].bitfield.reg32
4882 || i.tm.operand_types[op].bitfield.acc))
4884 /* Prohibit these changes in the 64bit mode, since the
4885 lowering is more complicated. */
4887 && i.tm.opcode_modifier.todword
4888 && !i.types[0].bitfield.regxmm)
4890 /* Convert to DWORD. We don't want REX byte. */
4891 i.suffix = LONG_MNEM_SUFFIX;
4895 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4896 register_prefix, i.op[op].regs->reg_name,
4905 check_word_reg (void)
4908 for (op = i.operands; --op >= 0;)
4909 /* Reject eight bit registers, except where the template requires
4910 them. (eg. movzb) */
4911 if (i.types[op].bitfield.reg8
4912 && (i.tm.operand_types[op].bitfield.reg16
4913 || i.tm.operand_types[op].bitfield.reg32
4914 || i.tm.operand_types[op].bitfield.acc))
4916 as_bad (_("`%s%s' not allowed with `%s%c'"),
4918 i.op[op].regs->reg_name,
4923 /* Warn if the e prefix on a general reg is present. */
4924 else if ((!quiet_warnings || flag_code == CODE_64BIT)
4925 && i.types[op].bitfield.reg32
4926 && (i.tm.operand_types[op].bitfield.reg16
4927 || i.tm.operand_types[op].bitfield.acc))
4929 /* Prohibit these changes in the 64bit mode, since the
4930 lowering is more complicated. */
4931 if (flag_code == CODE_64BIT)
4933 as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
4934 register_prefix, i.op[op].regs->reg_name,
4939 #if REGISTER_WARNINGS
4940 as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
4942 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
4944 i.op[op].regs->reg_name,
4952 update_imm (unsigned int j)
4954 i386_operand_type overlap = i.types[j];
4955 if ((overlap.bitfield.imm8
4956 || overlap.bitfield.imm8s
4957 || overlap.bitfield.imm16
4958 || overlap.bitfield.imm32
4959 || overlap.bitfield.imm32s
4960 || overlap.bitfield.imm64)
4961 && !operand_type_equal (&overlap, &imm8)
4962 && !operand_type_equal (&overlap, &imm8s)
4963 && !operand_type_equal (&overlap, &imm16)
4964 && !operand_type_equal (&overlap, &imm32)
4965 && !operand_type_equal (&overlap, &imm32s)
4966 && !operand_type_equal (&overlap, &imm64))
4970 i386_operand_type temp;
4972 operand_type_set (&temp, 0);
4973 if (i.suffix == BYTE_MNEM_SUFFIX)
4975 temp.bitfield.imm8 = overlap.bitfield.imm8;
4976 temp.bitfield.imm8s = overlap.bitfield.imm8s;
4978 else if (i.suffix == WORD_MNEM_SUFFIX)
4979 temp.bitfield.imm16 = overlap.bitfield.imm16;
4980 else if (i.suffix == QWORD_MNEM_SUFFIX)
4982 temp.bitfield.imm64 = overlap.bitfield.imm64;
4983 temp.bitfield.imm32s = overlap.bitfield.imm32s;
4986 temp.bitfield.imm32 = overlap.bitfield.imm32;
4989 else if (operand_type_equal (&overlap, &imm16_32_32s)
4990 || operand_type_equal (&overlap, &imm16_32)
4991 || operand_type_equal (&overlap, &imm16_32s))
4993 if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4998 if (!operand_type_equal (&overlap, &imm8)
4999 && !operand_type_equal (&overlap, &imm8s)
5000 && !operand_type_equal (&overlap, &imm16)
5001 && !operand_type_equal (&overlap, &imm32)
5002 && !operand_type_equal (&overlap, &imm32s)
5003 && !operand_type_equal (&overlap, &imm64))
5005 as_bad (_("no instruction mnemonic suffix given; "
5006 "can't determine immediate size"));
5010 i.types[j] = overlap;
5020 /* Update the first 2 immediate operands. */
5021 n = i.operands > 2 ? 2 : i.operands;
5024 for (j = 0; j < n; j++)
5025 if (update_imm (j) == 0)
5028 /* The 3rd operand can't be immediate operand. */
5029 gas_assert (operand_type_check (i.types[2], imm) == 0);
5036 bad_implicit_operand (int xmm)
5038 const char *ireg = xmm ? "xmm0" : "ymm0";
5041 as_bad (_("the last operand of `%s' must be `%s%s'"),
5042 i.tm.name, register_prefix, ireg);
5044 as_bad (_("the first operand of `%s' must be `%s%s'"),
5045 i.tm.name, register_prefix, ireg);
5050 process_operands (void)
5052 /* Default segment register this instruction will use for memory
5053 accesses. 0 means unknown. This is only for optimizing out
5054 unnecessary segment overrides. */
5055 const seg_entry *default_seg = 0;
5057 if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5059 unsigned int dupl = i.operands;
5060 unsigned int dest = dupl - 1;
5063 /* The destination must be an xmm register. */
5064 gas_assert (i.reg_operands
5065 && MAX_OPERANDS > dupl
5066 && operand_type_equal (&i.types[dest], ®xmm));
5068 if (i.tm.opcode_modifier.firstxmm0)
5070 /* The first operand is implicit and must be xmm0. */
5071 gas_assert (operand_type_equal (&i.types[0], ®xmm));
5072 if (i.op[0].regs->reg_num != 0)
5073 return bad_implicit_operand (1);
5075 if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5077 /* Keep xmm0 for instructions with VEX prefix and 3
5083 /* We remove the first xmm0 and keep the number of
5084 operands unchanged, which in fact duplicates the
5086 for (j = 1; j < i.operands; j++)
5088 i.op[j - 1] = i.op[j];
5089 i.types[j - 1] = i.types[j];
5090 i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5094 else if (i.tm.opcode_modifier.implicit1stxmm0)
5096 gas_assert ((MAX_OPERANDS - 1) > dupl
5097 && (i.tm.opcode_modifier.vexsources
5100 /* Add the implicit xmm0 for instructions with VEX prefix
5102 for (j = i.operands; j > 0; j--)
5104 i.op[j] = i.op[j - 1];
5105 i.types[j] = i.types[j - 1];
5106 i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5109 = (const reg_entry *) hash_find (reg_hash, "xmm0");
5110 i.types[0] = regxmm;
5111 i.tm.operand_types[0] = regxmm;
5114 i.reg_operands += 2;
5119 i.op[dupl] = i.op[dest];
5120 i.types[dupl] = i.types[dest];
5121 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5130 i.op[dupl] = i.op[dest];
5131 i.types[dupl] = i.types[dest];
5132 i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5135 if (i.tm.opcode_modifier.immext)
5138 else if (i.tm.opcode_modifier.firstxmm0)
5142 /* The first operand is implicit and must be xmm0/ymm0. */
5143 gas_assert (i.reg_operands
5144 && (operand_type_equal (&i.types[0], ®xmm)
5145 || operand_type_equal (&i.types[0], ®ymm)));
5146 if (i.op[0].regs->reg_num != 0)
5147 return bad_implicit_operand (i.types[0].bitfield.regxmm);
5149 for (j = 1; j < i.operands; j++)
5151 i.op[j - 1] = i.op[j];
5152 i.types[j - 1] = i.types[j];
5154 /* We need to adjust fields in i.tm since they are used by
5155 build_modrm_byte. */
5156 i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5163 else if (i.tm.opcode_modifier.regkludge)
5165 /* The imul $imm, %reg instruction is converted into
5166 imul $imm, %reg, %reg, and the clr %reg instruction
5167 is converted into xor %reg, %reg. */
5169 unsigned int first_reg_op;
5171 if (operand_type_check (i.types[0], reg))
5175 /* Pretend we saw the extra register operand. */
5176 gas_assert (i.reg_operands == 1
5177 && i.op[first_reg_op + 1].regs == 0);
5178 i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5179 i.types[first_reg_op + 1] = i.types[first_reg_op];
5184 if (i.tm.opcode_modifier.shortform)
5186 if (i.types[0].bitfield.sreg2
5187 || i.types[0].bitfield.sreg3)
5189 if (i.tm.base_opcode == POP_SEG_SHORT
5190 && i.op[0].regs->reg_num == 1)
5192 as_bad (_("you can't `pop %scs'"), register_prefix);
5195 i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5196 if ((i.op[0].regs->reg_flags & RegRex) != 0)
5201 /* The register or float register operand is in operand
5205 if (i.types[0].bitfield.floatreg
5206 || operand_type_check (i.types[0], reg))
5210 /* Register goes in low 3 bits of opcode. */
5211 i.tm.base_opcode |= i.op[op].regs->reg_num;
5212 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5214 if (!quiet_warnings && i.tm.opcode_modifier.ugh)
5216 /* Warn about some common errors, but press on regardless.
5217 The first case can be generated by gcc (<= 2.8.1). */
5218 if (i.operands == 2)
5220 /* Reversed arguments on faddp, fsubp, etc. */
5221 as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
5222 register_prefix, i.op[!intel_syntax].regs->reg_name,
5223 register_prefix, i.op[intel_syntax].regs->reg_name);
5227 /* Extraneous `l' suffix on fp insn. */
5228 as_warn (_("translating to `%s %s%s'"), i.tm.name,
5229 register_prefix, i.op[0].regs->reg_name);
5234 else if (i.tm.opcode_modifier.modrm)
5236 /* The opcode is completed (modulo i.tm.extension_opcode which
5237 must be put into the modrm byte). Now, we make the modrm and
5238 index base bytes based on all the info we've collected. */
5240 default_seg = build_modrm_byte ();
5242 else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
5246 else if (i.tm.opcode_modifier.isstring)
5248 /* For the string instructions that allow a segment override
5249 on one of their operands, the default segment is ds. */
5253 if (i.tm.base_opcode == 0x8d /* lea */
5256 as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
5258 /* If a segment was explicitly specified, and the specified segment
5259 is not the default, use an opcode prefix to select it. If we
5260 never figured out what the default segment is, then default_seg
5261 will be zero at this point, and the specified segment prefix will
5263 if ((i.seg[0]) && (i.seg[0] != default_seg))
5265 if (!add_prefix (i.seg[0]->seg_prefix))
5271 static const seg_entry *
5272 build_modrm_byte (void)
5274 const seg_entry *default_seg = 0;
5275 unsigned int source, dest;
5278 /* The first operand of instructions with VEX prefix and 3 sources
5279 must be VEX_Imm4. */
5280 vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
5283 unsigned int nds, reg_slot;
5286 if (i.tm.opcode_modifier.veximmext
5287 && i.tm.opcode_modifier.immext)
5289 dest = i.operands - 2;
5290 gas_assert (dest == 3);
5293 dest = i.operands - 1;
5296 /* There are 2 kinds of instructions:
5297 1. 5 operands: 4 register operands or 3 register operands
5298 plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
5299 VexW0 or VexW1. The destination must be either XMM or YMM
5301 2. 4 operands: 4 register operands or 3 register operands
5302 plus 1 memory operand, VexXDS, and VexImmExt */
5303 gas_assert ((i.reg_operands == 4
5304 || (i.reg_operands == 3 && i.mem_operands == 1))
5305 && i.tm.opcode_modifier.vexvvvv == VEXXDS
5306 && (i.tm.opcode_modifier.veximmext
5307 || (i.imm_operands == 1
5308 && i.types[0].bitfield.vec_imm4
5309 && (i.tm.opcode_modifier.vexw == VEXW0
5310 || i.tm.opcode_modifier.vexw == VEXW1)
5311 && (operand_type_equal (&i.tm.operand_types[dest], ®xmm)
5312 || operand_type_equal (&i.tm.operand_types[dest], ®ymm)))));
5314 if (i.imm_operands == 0)
5316 /* When there is no immediate operand, generate an 8bit
5317 immediate operand to encode the first operand. */
5318 exp = &im_expressions[i.imm_operands++];
5319 i.op[i.operands].imms = exp;
5320 i.types[i.operands] = imm8;
5322 /* If VexW1 is set, the first operand is the source and
5323 the second operand is encoded in the immediate operand. */
5324 if (i.tm.opcode_modifier.vexw == VEXW1)
5335 /* FMA swaps REG and NDS. */
5336 if (i.tm.cpu_flags.bitfield.cpufma)
5344 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5346 || operand_type_equal (&i.tm.operand_types[reg_slot],
5348 exp->X_op = O_constant;
5350 = ((i.op[reg_slot].regs->reg_num
5351 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5356 unsigned int imm_slot;
5358 if (i.tm.opcode_modifier.vexw == VEXW0)
5360 /* If VexW0 is set, the third operand is the source and
5361 the second operand is encoded in the immediate
5368 /* VexW1 is set, the second operand is the source and
5369 the third operand is encoded in the immediate
5375 if (i.tm.opcode_modifier.immext)
5377 /* When ImmExt is set, the immdiate byte is the last
5379 imm_slot = i.operands - 1;
5387 /* Turn on Imm8 so that output_imm will generate it. */
5388 i.types[imm_slot].bitfield.imm8 = 1;
5391 gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
5393 || operand_type_equal (&i.tm.operand_types[reg_slot],
5395 i.op[imm_slot].imms->X_add_number
5396 |= ((i.op[reg_slot].regs->reg_num
5397 + ((i.op[reg_slot].regs->reg_flags & RegRex) ? 8 : 0))
5401 gas_assert (operand_type_equal (&i.tm.operand_types[nds], ®xmm)
5402 || operand_type_equal (&i.tm.operand_types[nds],
5404 i.vex.register_specifier = i.op[nds].regs;
5409 /* i.reg_operands MUST be the number of real register operands;
5410 implicit registers do not count. If there are 3 register
5411 operands, it must be a instruction with VexNDS. For a
5412 instruction with VexNDD, the destination register is encoded
5413 in VEX prefix. If there are 4 register operands, it must be
5414 a instruction with VEX prefix and 3 sources. */
5415 if (i.mem_operands == 0
5416 && ((i.reg_operands == 2
5417 && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
5418 || (i.reg_operands == 3
5419 && i.tm.opcode_modifier.vexvvvv == VEXXDS)
5420 || (i.reg_operands == 4 && vex_3_sources)))
5428 /* When there are 3 operands, one of them may be immediate,
5429 which may be the first or the last operand. Otherwise,
5430 the first operand must be shift count register (cl) or it
5431 is an instruction with VexNDS. */
5432 gas_assert (i.imm_operands == 1
5433 || (i.imm_operands == 0
5434 && (i.tm.opcode_modifier.vexvvvv == VEXXDS
5435 || i.types[0].bitfield.shiftcount)));
5436 if (operand_type_check (i.types[0], imm)
5437 || i.types[0].bitfield.shiftcount)
5443 /* When there are 4 operands, the first two must be 8bit
5444 immediate operands. The source operand will be the 3rd
5447 For instructions with VexNDS, if the first operand
5448 an imm8, the source operand is the 2nd one. If the last
5449 operand is imm8, the source operand is the first one. */
5450 gas_assert ((i.imm_operands == 2
5451 && i.types[0].bitfield.imm8
5452 && i.types[1].bitfield.imm8)
5453 || (i.tm.opcode_modifier.vexvvvv == VEXXDS
5454 && i.imm_operands == 1
5455 && (i.types[0].bitfield.imm8
5456 || i.types[i.operands - 1].bitfield.imm8)));
5457 if (i.imm_operands == 2)
5461 if (i.types[0].bitfield.imm8)
5477 if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5479 /* For instructions with VexNDS, the register-only
5480 source operand must be 32/64bit integer, XMM or
5481 YMM register. It is encoded in VEX prefix. We
5482 need to clear RegMem bit before calling
5483 operand_type_equal. */
5485 i386_operand_type op;
5488 /* Check register-only source operand when two source
5489 operands are swapped. */
5490 if (!i.tm.operand_types[source].bitfield.baseindex
5491 && i.tm.operand_types[dest].bitfield.baseindex)
5499 op = i.tm.operand_types[vvvv];
5500 op.bitfield.regmem = 0;
5501 if ((dest + 1) >= i.operands
5502 || (op.bitfield.reg32 != 1
5503 && !op.bitfield.reg64 != 1
5504 && !operand_type_equal (&op, ®xmm)
5505 && !operand_type_equal (&op, ®ymm)))
5507 i.vex.register_specifier = i.op[vvvv].regs;
5513 /* One of the register operands will be encoded in the i.tm.reg
5514 field, the other in the combined i.tm.mode and i.tm.regmem
5515 fields. If no form of this instruction supports a memory
5516 destination operand, then we assume the source operand may
5517 sometimes be a memory operand and so we need to store the
5518 destination in the i.rm.reg field. */
5519 if (!i.tm.operand_types[dest].bitfield.regmem
5520 && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
5522 i.rm.reg = i.op[dest].regs->reg_num;
5523 i.rm.regmem = i.op[source].regs->reg_num;
5524 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5526 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5531 i.rm.reg = i.op[source].regs->reg_num;
5532 i.rm.regmem = i.op[dest].regs->reg_num;
5533 if ((i.op[dest].regs->reg_flags & RegRex) != 0)
5535 if ((i.op[source].regs->reg_flags & RegRex) != 0)
5538 if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
5540 if (!i.types[0].bitfield.control
5541 && !i.types[1].bitfield.control)
5543 i.rex &= ~(REX_R | REX_B);
5544 add_prefix (LOCK_PREFIX_OPCODE);
5548 { /* If it's not 2 reg operands... */
5553 unsigned int fake_zero_displacement = 0;
5556 for (op = 0; op < i.operands; op++)
5557 if (operand_type_check (i.types[op], anymem))
5559 gas_assert (op < i.operands);
5561 if (i.tm.opcode_modifier.vecsib)
5563 if (i.index_reg->reg_num == RegEiz
5564 || i.index_reg->reg_num == RegRiz)
5567 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5570 i.sib.base = NO_BASE_REGISTER;
5571 i.sib.scale = i.log2_scale_factor;
5572 i.types[op].bitfield.disp8 = 0;
5573 i.types[op].bitfield.disp16 = 0;
5574 i.types[op].bitfield.disp64 = 0;
5575 if (flag_code != CODE_64BIT)
5577 /* Must be 32 bit */
5578 i.types[op].bitfield.disp32 = 1;
5579 i.types[op].bitfield.disp32s = 0;
5583 i.types[op].bitfield.disp32 = 0;
5584 i.types[op].bitfield.disp32s = 1;
5587 i.sib.index = i.index_reg->reg_num;
5588 if ((i.index_reg->reg_flags & RegRex) != 0)
5594 if (i.base_reg == 0)
5597 if (!i.disp_operands)
5599 fake_zero_displacement = 1;
5600 /* Instructions with VSIB byte need 32bit displacement
5601 if there is no base register. */
5602 if (i.tm.opcode_modifier.vecsib)
5603 i.types[op].bitfield.disp32 = 1;
5605 if (i.index_reg == 0)
5607 gas_assert (!i.tm.opcode_modifier.vecsib);
5608 /* Operand is just <disp> */
5609 if (flag_code == CODE_64BIT)
5611 /* 64bit mode overwrites the 32bit absolute
5612 addressing by RIP relative addressing and
5613 absolute addressing is encoded by one of the
5614 redundant SIB forms. */
5615 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5616 i.sib.base = NO_BASE_REGISTER;
5617 i.sib.index = NO_INDEX_REGISTER;
5618 i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
5619 ? disp32s : disp32);
5621 else if ((flag_code == CODE_16BIT)
5622 ^ (i.prefix[ADDR_PREFIX] != 0))
5624 i.rm.regmem = NO_BASE_REGISTER_16;
5625 i.types[op] = disp16;
5629 i.rm.regmem = NO_BASE_REGISTER;
5630 i.types[op] = disp32;
5633 else if (!i.tm.opcode_modifier.vecsib)
5635 /* !i.base_reg && i.index_reg */
5636 if (i.index_reg->reg_num == RegEiz
5637 || i.index_reg->reg_num == RegRiz)
5638 i.sib.index = NO_INDEX_REGISTER;
5640 i.sib.index = i.index_reg->reg_num;
5641 i.sib.base = NO_BASE_REGISTER;
5642 i.sib.scale = i.log2_scale_factor;
5643 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5644 i.types[op].bitfield.disp8 = 0;
5645 i.types[op].bitfield.disp16 = 0;
5646 i.types[op].bitfield.disp64 = 0;
5647 if (flag_code != CODE_64BIT)
5649 /* Must be 32 bit */
5650 i.types[op].bitfield.disp32 = 1;
5651 i.types[op].bitfield.disp32s = 0;
5655 i.types[op].bitfield.disp32 = 0;
5656 i.types[op].bitfield.disp32s = 1;
5658 if ((i.index_reg->reg_flags & RegRex) != 0)
5662 /* RIP addressing for 64bit mode. */
5663 else if (i.base_reg->reg_num == RegRip ||
5664 i.base_reg->reg_num == RegEip)
5666 gas_assert (!i.tm.opcode_modifier.vecsib);
5667 i.rm.regmem = NO_BASE_REGISTER;
5668 i.types[op].bitfield.disp8 = 0;
5669 i.types[op].bitfield.disp16 = 0;
5670 i.types[op].bitfield.disp32 = 0;
5671 i.types[op].bitfield.disp32s = 1;
5672 i.types[op].bitfield.disp64 = 0;
5673 i.flags[op] |= Operand_PCrel;
5674 if (! i.disp_operands)
5675 fake_zero_displacement = 1;
5677 else if (i.base_reg->reg_type.bitfield.reg16)
5679 gas_assert (!i.tm.opcode_modifier.vecsib);
5680 switch (i.base_reg->reg_num)
5683 if (i.index_reg == 0)
5685 else /* (%bx,%si) -> 0, or (%bx,%di) -> 1 */
5686 i.rm.regmem = i.index_reg->reg_num - 6;
5690 if (i.index_reg == 0)
5693 if (operand_type_check (i.types[op], disp) == 0)
5695 /* fake (%bp) into 0(%bp) */
5696 i.types[op].bitfield.disp8 = 1;
5697 fake_zero_displacement = 1;
5700 else /* (%bp,%si) -> 2, or (%bp,%di) -> 3 */
5701 i.rm.regmem = i.index_reg->reg_num - 6 + 2;
5703 default: /* (%si) -> 4 or (%di) -> 5 */
5704 i.rm.regmem = i.base_reg->reg_num - 6 + 4;
5706 i.rm.mode = mode_from_disp_size (i.types[op]);
5708 else /* i.base_reg and 32/64 bit mode */
5710 if (flag_code == CODE_64BIT
5711 && operand_type_check (i.types[op], disp))
5713 i386_operand_type temp;
5714 operand_type_set (&temp, 0);
5715 temp.bitfield.disp8 = i.types[op].bitfield.disp8;
5717 if (i.prefix[ADDR_PREFIX] == 0)
5718 i.types[op].bitfield.disp32s = 1;
5720 i.types[op].bitfield.disp32 = 1;
5723 if (!i.tm.opcode_modifier.vecsib)
5724 i.rm.regmem = i.base_reg->reg_num;
5725 if ((i.base_reg->reg_flags & RegRex) != 0)
5727 i.sib.base = i.base_reg->reg_num;
5728 /* x86-64 ignores REX prefix bit here to avoid decoder
5730 if ((i.base_reg->reg_num & 7) == EBP_REG_NUM)
5733 if (i.disp_operands == 0)
5735 fake_zero_displacement = 1;
5736 i.types[op].bitfield.disp8 = 1;
5739 else if (i.base_reg->reg_num == ESP_REG_NUM)
5743 i.sib.scale = i.log2_scale_factor;
5744 if (i.index_reg == 0)
5746 gas_assert (!i.tm.opcode_modifier.vecsib);
5747 /* <disp>(%esp) becomes two byte modrm with no index
5748 register. We've already stored the code for esp
5749 in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
5750 Any base register besides %esp will not use the
5751 extra modrm byte. */
5752 i.sib.index = NO_INDEX_REGISTER;
5754 else if (!i.tm.opcode_modifier.vecsib)
5756 if (i.index_reg->reg_num == RegEiz
5757 || i.index_reg->reg_num == RegRiz)
5758 i.sib.index = NO_INDEX_REGISTER;
5760 i.sib.index = i.index_reg->reg_num;
5761 i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
5762 if ((i.index_reg->reg_flags & RegRex) != 0)
5767 && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
5768 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
5772 if (!fake_zero_displacement
5776 fake_zero_displacement = 1;
5777 if (i.disp_encoding == disp_encoding_8bit)
5778 i.types[op].bitfield.disp8 = 1;
5780 i.types[op].bitfield.disp32 = 1;
5782 i.rm.mode = mode_from_disp_size (i.types[op]);
5786 if (fake_zero_displacement)
5788 /* Fakes a zero displacement assuming that i.types[op]
5789 holds the correct displacement size. */
5792 gas_assert (i.op[op].disps == 0);
5793 exp = &disp_expressions[i.disp_operands++];
5794 i.op[op].disps = exp;
5795 exp->X_op = O_constant;
5796 exp->X_add_number = 0;
5797 exp->X_add_symbol = (symbolS *) 0;
5798 exp->X_op_symbol = (symbolS *) 0;
5806 if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
5808 if (operand_type_check (i.types[0], imm))
5809 i.vex.register_specifier = NULL;
5812 /* VEX.vvvv encodes one of the sources when the first
5813 operand is not an immediate. */
5814 if (i.tm.opcode_modifier.vexw == VEXW0)
5815 i.vex.register_specifier = i.op[0].regs;
5817 i.vex.register_specifier = i.op[1].regs;
5820 /* Destination is a XMM register encoded in the ModRM.reg
5822 i.rm.reg = i.op[2].regs->reg_num;
5823 if ((i.op[2].regs->reg_flags & RegRex) != 0)
5826 /* ModRM.rm and VEX.B encodes the other source. */
5827 if (!i.mem_operands)
5831 if (i.tm.opcode_modifier.vexw == VEXW0)
5832 i.rm.regmem = i.op[1].regs->reg_num;
5834 i.rm.regmem = i.op[0].regs->reg_num;
5836 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5840 else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
5842 i.vex.register_specifier = i.op[2].regs;
5843 if (!i.mem_operands)
5846 i.rm.regmem = i.op[1].regs->reg_num;
5847 if ((i.op[1].regs->reg_flags & RegRex) != 0)
5851 /* Fill in i.rm.reg or i.rm.regmem field with register operand
5852 (if any) based on i.tm.extension_opcode. Again, we must be
5853 careful to make sure that segment/control/debug/test/MMX
5854 registers are coded into the i.rm.reg field. */
5855 else if (i.reg_operands)
5858 unsigned int vex_reg = ~0;
5860 for (op = 0; op < i.operands; op++)
5861 if (i.types[op].bitfield.reg8
5862 || i.types[op].bitfield.reg16
5863 || i.types[op].bitfield.reg32
5864 || i.types[op].bitfield.reg64
5865 || i.types[op].bitfield.regmmx
5866 || i.types[op].bitfield.regxmm
5867 || i.types[op].bitfield.regymm
5868 || i.types[op].bitfield.sreg2
5869 || i.types[op].bitfield.sreg3
5870 || i.types[op].bitfield.control
5871 || i.types[op].bitfield.debug
5872 || i.types[op].bitfield.test)
5877 else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
5879 /* For instructions with VexNDS, the register-only
5880 source operand is encoded in VEX prefix. */
5881 gas_assert (mem != (unsigned int) ~0);
5886 gas_assert (op < i.operands);
5890 /* Check register-only source operand when two source
5891 operands are swapped. */
5892 if (!i.tm.operand_types[op].bitfield.baseindex
5893 && i.tm.operand_types[op + 1].bitfield.baseindex)
5897 gas_assert (mem == (vex_reg + 1)
5898 && op < i.operands);
5903 gas_assert (vex_reg < i.operands);
5907 else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
5909 /* For instructions with VexNDD, the register destination
5910 is encoded in VEX prefix. */
5911 if (i.mem_operands == 0)
5913 /* There is no memory operand. */
5914 gas_assert ((op + 2) == i.operands);
5919 /* There are only 2 operands. */
5920 gas_assert (op < 2 && i.operands == 2);
5925 gas_assert (op < i.operands);
5927 if (vex_reg != (unsigned int) ~0)
5929 i386_operand_type *type = &i.tm.operand_types[vex_reg];
5931 if (type->bitfield.reg32 != 1
5932 && type->bitfield.reg64 != 1
5933 && !operand_type_equal (type, ®xmm)
5934 && !operand_type_equal (type, ®ymm))
5937 i.vex.register_specifier = i.op[vex_reg].regs;
5940 /* Don't set OP operand twice. */
5943 /* If there is an extension opcode to put here, the
5944 register number must be put into the regmem field. */
5945 if (i.tm.extension_opcode != None)
5947 i.rm.regmem = i.op[op].regs->reg_num;
5948 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5953 i.rm.reg = i.op[op].regs->reg_num;
5954 if ((i.op[op].regs->reg_flags & RegRex) != 0)
5959 /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
5960 must set it to 3 to indicate this is a register operand
5961 in the regmem field. */
5962 if (!i.mem_operands)
5966 /* Fill in i.rm.reg field with extension opcode (if any). */
5967 if (i.tm.extension_opcode != None)
5968 i.rm.reg = i.tm.extension_opcode;
5974 output_branch (void)
5980 relax_substateT subtype;
5984 code16 = flag_code == CODE_16BIT ? CODE16 : 0;
5985 size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
5988 if (i.prefix[DATA_PREFIX] != 0)
5994 /* Pentium4 branch hints. */
5995 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
5996 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6001 if (i.prefix[REX_PREFIX] != 0)
6007 if (i.prefixes != 0 && !intel_syntax)
6008 as_warn (_("skipping prefixes on this instruction"));
6010 /* It's always a symbol; End frag & setup for relax.
6011 Make sure there is enough room in this frag for the largest
6012 instruction we may generate in md_convert_frag. This is 2
6013 bytes for the opcode and room for the prefix and largest
6015 frag_grow (prefix + 2 + 4);
6016 /* Prefix and 1 opcode byte go in fr_fix. */
6017 p = frag_more (prefix + 1);
6018 if (i.prefix[DATA_PREFIX] != 0)
6019 *p++ = DATA_PREFIX_OPCODE;
6020 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6021 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6022 *p++ = i.prefix[SEG_PREFIX];
6023 if (i.prefix[REX_PREFIX] != 0)
6024 *p++ = i.prefix[REX_PREFIX];
6025 *p = i.tm.base_opcode;
6027 if ((unsigned char) *p == JUMP_PC_RELATIVE)
6028 subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6029 else if (cpu_arch_flags.bitfield.cpui386)
6030 subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6032 subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6035 sym = i.op[0].disps->X_add_symbol;
6036 off = i.op[0].disps->X_add_number;
6038 if (i.op[0].disps->X_op != O_constant
6039 && i.op[0].disps->X_op != O_symbol)
6041 /* Handle complex expressions. */
6042 sym = make_expr_symbol (i.op[0].disps);
6046 /* 1 possible extra opcode + 4 byte displacement go in var part.
6047 Pass reloc in fr_var. */
6048 frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6058 if (i.tm.opcode_modifier.jumpbyte)
6060 /* This is a loop or jecxz type instruction. */
6062 if (i.prefix[ADDR_PREFIX] != 0)
6064 FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6067 /* Pentium4 branch hints. */
6068 if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6069 || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6071 FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6080 if (flag_code == CODE_16BIT)
6083 if (i.prefix[DATA_PREFIX] != 0)
6085 FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6095 if (i.prefix[REX_PREFIX] != 0)
6097 FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6101 if (i.prefixes != 0 && !intel_syntax)
6102 as_warn (_("skipping prefixes on this instruction"));
6104 p = frag_more (i.tm.opcode_length + size);
6105 switch (i.tm.opcode_length)
6108 *p++ = i.tm.base_opcode >> 8;
6110 *p++ = i.tm.base_opcode;
6116 fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6117 i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6119 /* All jumps handled here are signed, but don't use a signed limit
6120 check for 32 and 16 bit jumps as we want to allow wrap around at
6121 4G and 64k respectively. */
6123 fixP->fx_signed = 1;
6127 output_interseg_jump (void)
6135 if (flag_code == CODE_16BIT)
6139 if (i.prefix[DATA_PREFIX] != 0)
6145 if (i.prefix[REX_PREFIX] != 0)
6155 if (i.prefixes != 0 && !intel_syntax)
6156 as_warn (_("skipping prefixes on this instruction"));
6158 /* 1 opcode; 2 segment; offset */
6159 p = frag_more (prefix + 1 + 2 + size);
6161 if (i.prefix[DATA_PREFIX] != 0)
6162 *p++ = DATA_PREFIX_OPCODE;
6164 if (i.prefix[REX_PREFIX] != 0)
6165 *p++ = i.prefix[REX_PREFIX];
6167 *p++ = i.tm.base_opcode;
6168 if (i.op[1].imms->X_op == O_constant)
6170 offsetT n = i.op[1].imms->X_add_number;
6173 && !fits_in_unsigned_word (n)
6174 && !fits_in_signed_word (n))
6176 as_bad (_("16-bit jump out of range"));
6179 md_number_to_chars (p, n, size);
6182 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6183 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
6184 if (i.op[0].imms->X_op != O_constant)
6185 as_bad (_("can't handle non absolute segment in `%s'"),
6187 md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
6193 fragS *insn_start_frag;
6194 offsetT insn_start_off;
6196 /* Tie dwarf2 debug info to the address at the start of the insn.
6197 We can't do this after the insn has been output as the current
6198 frag may have been closed off. eg. by frag_var. */
6199 dwarf2_emit_insn (0);
6201 insn_start_frag = frag_now;
6202 insn_start_off = frag_now_fix ();
6205 if (i.tm.opcode_modifier.jump)
6207 else if (i.tm.opcode_modifier.jumpbyte
6208 || i.tm.opcode_modifier.jumpdword)
6210 else if (i.tm.opcode_modifier.jumpintersegment)
6211 output_interseg_jump ();
6214 /* Output normal instructions here. */
6218 unsigned int prefix;
6220 /* Since the VEX prefix contains the implicit prefix, we don't
6221 need the explicit prefix. */
6222 if (!i.tm.opcode_modifier.vex)
6224 switch (i.tm.opcode_length)
6227 if (i.tm.base_opcode & 0xff000000)
6229 prefix = (i.tm.base_opcode >> 24) & 0xff;
6234 if ((i.tm.base_opcode & 0xff0000) != 0)
6236 prefix = (i.tm.base_opcode >> 16) & 0xff;
6237 if (i.tm.cpu_flags.bitfield.cpupadlock)
6240 if (prefix != REPE_PREFIX_OPCODE
6241 || (i.prefix[REP_PREFIX]
6242 != REPE_PREFIX_OPCODE))
6243 add_prefix (prefix);
6246 add_prefix (prefix);
6255 /* The prefix bytes. */
6256 for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
6258 FRAG_APPEND_1_CHAR (*q);
6262 for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
6267 /* REX byte is encoded in VEX prefix. */
6271 FRAG_APPEND_1_CHAR (*q);
6274 /* There should be no other prefixes for instructions
6279 /* Now the VEX prefix. */
6280 p = frag_more (i.vex.length);
6281 for (j = 0; j < i.vex.length; j++)
6282 p[j] = i.vex.bytes[j];
6285 /* Now the opcode; be careful about word order here! */
6286 if (i.tm.opcode_length == 1)
6288 FRAG_APPEND_1_CHAR (i.tm.base_opcode);
6292 switch (i.tm.opcode_length)
6296 *p++ = (i.tm.base_opcode >> 16) & 0xff;
6306 /* Put out high byte first: can't use md_number_to_chars! */
6307 *p++ = (i.tm.base_opcode >> 8) & 0xff;
6308 *p = i.tm.base_opcode & 0xff;
6311 /* Now the modrm byte and sib byte (if present). */
6312 if (i.tm.opcode_modifier.modrm)
6314 FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
6317 /* If i.rm.regmem == ESP (4)
6318 && i.rm.mode != (Register mode)
6320 ==> need second modrm byte. */
6321 if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
6323 && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
6324 FRAG_APPEND_1_CHAR ((i.sib.base << 0
6326 | i.sib.scale << 6));
6329 if (i.disp_operands)
6330 output_disp (insn_start_frag, insn_start_off);
6333 output_imm (insn_start_frag, insn_start_off);
6339 pi ("" /*line*/, &i);
6341 #endif /* DEBUG386 */
6344 /* Return the size of the displacement operand N. */
6347 disp_size (unsigned int n)
6350 if (i.types[n].bitfield.disp64)
6352 else if (i.types[n].bitfield.disp8)
6354 else if (i.types[n].bitfield.disp16)
6359 /* Return the size of the immediate operand N. */
6362 imm_size (unsigned int n)
6365 if (i.types[n].bitfield.imm64)
6367 else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
6369 else if (i.types[n].bitfield.imm16)
6375 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
6380 for (n = 0; n < i.operands; n++)
6382 if (operand_type_check (i.types[n], disp))
6384 if (i.op[n].disps->X_op == O_constant)
6386 int size = disp_size (n);
6389 val = offset_in_range (i.op[n].disps->X_add_number,
6391 p = frag_more (size);
6392 md_number_to_chars (p, val, size);
6396 enum bfd_reloc_code_real reloc_type;
6397 int size = disp_size (n);
6398 int sign = i.types[n].bitfield.disp32s;
6399 int pcrel = (i.flags[n] & Operand_PCrel) != 0;
6401 /* We can't have 8 bit displacement here. */
6402 gas_assert (!i.types[n].bitfield.disp8);
6404 /* The PC relative address is computed relative
6405 to the instruction boundary, so in case immediate
6406 fields follows, we need to adjust the value. */
6407 if (pcrel && i.imm_operands)
6412 for (n1 = 0; n1 < i.operands; n1++)
6413 if (operand_type_check (i.types[n1], imm))
6415 /* Only one immediate is allowed for PC
6416 relative address. */
6417 gas_assert (sz == 0);
6419 i.op[n].disps->X_add_number -= sz;
6421 /* We should find the immediate. */
6422 gas_assert (sz != 0);
6425 p = frag_more (size);
6426 reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
6428 && GOT_symbol == i.op[n].disps->X_add_symbol
6429 && (((reloc_type == BFD_RELOC_32
6430 || reloc_type == BFD_RELOC_X86_64_32S
6431 || (reloc_type == BFD_RELOC_64
6433 && (i.op[n].disps->X_op == O_symbol
6434 || (i.op[n].disps->X_op == O_add
6435 && ((symbol_get_value_expression
6436 (i.op[n].disps->X_op_symbol)->X_op)
6438 || reloc_type == BFD_RELOC_32_PCREL))
6442 if (insn_start_frag == frag_now)
6443 add = (p - frag_now->fr_literal) - insn_start_off;
6448 add = insn_start_frag->fr_fix - insn_start_off;
6449 for (fr = insn_start_frag->fr_next;
6450 fr && fr != frag_now; fr = fr->fr_next)
6452 add += p - frag_now->fr_literal;
6457 reloc_type = BFD_RELOC_386_GOTPC;
6458 i.op[n].imms->X_add_number += add;
6460 else if (reloc_type == BFD_RELOC_64)
6461 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6463 /* Don't do the adjustment for x86-64, as there
6464 the pcrel addressing is relative to the _next_
6465 insn, and that is taken care of in other code. */
6466 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6468 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6469 i.op[n].disps, pcrel, reloc_type);
6476 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
6481 for (n = 0; n < i.operands; n++)
6483 if (operand_type_check (i.types[n], imm))
6485 if (i.op[n].imms->X_op == O_constant)
6487 int size = imm_size (n);
6490 val = offset_in_range (i.op[n].imms->X_add_number,
6492 p = frag_more (size);
6493 md_number_to_chars (p, val, size);
6497 /* Not absolute_section.
6498 Need a 32-bit fixup (don't support 8bit
6499 non-absolute imms). Try to support other
6501 enum bfd_reloc_code_real reloc_type;
6502 int size = imm_size (n);
6505 if (i.types[n].bitfield.imm32s
6506 && (i.suffix == QWORD_MNEM_SUFFIX
6507 || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
6512 p = frag_more (size);
6513 reloc_type = reloc (size, 0, sign, i.reloc[n]);
6515 /* This is tough to explain. We end up with this one if we
6516 * have operands that look like
6517 * "_GLOBAL_OFFSET_TABLE_+[.-.L284]". The goal here is to
6518 * obtain the absolute address of the GOT, and it is strongly
6519 * preferable from a performance point of view to avoid using
6520 * a runtime relocation for this. The actual sequence of
6521 * instructions often look something like:
6526 * addl $_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
6528 * The call and pop essentially return the absolute address
6529 * of the label .L66 and store it in %ebx. The linker itself
6530 * will ultimately change the first operand of the addl so
6531 * that %ebx points to the GOT, but to keep things simple, the
6532 * .o file must have this operand set so that it generates not
6533 * the absolute address of .L66, but the absolute address of
6534 * itself. This allows the linker itself simply treat a GOTPC
6535 * relocation as asking for a pcrel offset to the GOT to be
6536 * added in, and the addend of the relocation is stored in the
6537 * operand field for the instruction itself.
6539 * Our job here is to fix the operand so that it would add
6540 * the correct offset so that %ebx would point to itself. The
6541 * thing that is tricky is that .-.L66 will point to the
6542 * beginning of the instruction, so we need to further modify
6543 * the operand so that it will point to itself. There are
6544 * other cases where you have something like:
6546 * .long $_GLOBAL_OFFSET_TABLE_+[.-.L66]
6548 * and here no correction would be required. Internally in
6549 * the assembler we treat operands of this form as not being
6550 * pcrel since the '.' is explicitly mentioned, and I wonder
6551 * whether it would simplify matters to do it this way. Who
6552 * knows. In earlier versions of the PIC patches, the
6553 * pcrel_adjust field was used to store the correction, but
6554 * since the expression is not pcrel, I felt it would be
6555 * confusing to do it this way. */
6557 if ((reloc_type == BFD_RELOC_32
6558 || reloc_type == BFD_RELOC_X86_64_32S
6559 || reloc_type == BFD_RELOC_64)
6561 && GOT_symbol == i.op[n].imms->X_add_symbol
6562 && (i.op[n].imms->X_op == O_symbol
6563 || (i.op[n].imms->X_op == O_add
6564 && ((symbol_get_value_expression
6565 (i.op[n].imms->X_op_symbol)->X_op)
6570 if (insn_start_frag == frag_now)
6571 add = (p - frag_now->fr_literal) - insn_start_off;
6576 add = insn_start_frag->fr_fix - insn_start_off;
6577 for (fr = insn_start_frag->fr_next;
6578 fr && fr != frag_now; fr = fr->fr_next)
6580 add += p - frag_now->fr_literal;
6584 reloc_type = BFD_RELOC_386_GOTPC;
6586 reloc_type = BFD_RELOC_X86_64_GOTPC32;
6588 reloc_type = BFD_RELOC_X86_64_GOTPC64;
6589 i.op[n].imms->X_add_number += add;
6591 fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6592 i.op[n].imms, 0, reloc_type);
6598 /* x86_cons_fix_new is called via the expression parsing code when a
6599 reloc is needed. We use this hook to get the correct .got reloc. */
6600 static enum bfd_reloc_code_real got_reloc = NO_RELOC;
6601 static int cons_sign = -1;
6604 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
6607 enum bfd_reloc_code_real r = reloc (len, 0, cons_sign, got_reloc);
6609 got_reloc = NO_RELOC;
6612 if (exp->X_op == O_secrel)
6614 exp->X_op = O_symbol;
6615 r = BFD_RELOC_32_SECREL;
6619 fix_new_exp (frag, off, len, exp, 0, r);
6622 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
6624 # define lex_got(reloc, adjust, types) NULL
6626 /* Parse operands of the form
6627 <symbol>@GOTOFF+<nnn>
6628 and similar .plt or .got references.
6630 If we find one, set up the correct relocation in RELOC and copy the
6631 input string, minus the `@GOTOFF' into a malloc'd buffer for
6632 parsing by the calling routine. Return this buffer, and if ADJUST
6633 is non-null set it to the length of the string we removed from the
6634 input line. Otherwise return NULL. */
6636 lex_got (enum bfd_reloc_code_real *rel,
6638 i386_operand_type *types)
6640 /* Some of the relocations depend on the size of what field is to
6641 be relocated. But in our callers i386_immediate and i386_displacement
6642 we don't yet know the operand size (this will be set by insn
6643 matching). Hence we record the word32 relocation here,
6644 and adjust the reloc according to the real size in reloc(). */
6645 static const struct {
6648 const enum bfd_reloc_code_real rel[2];
6649 const i386_operand_type types64;
6651 { STRING_COMMA_LEN ("PLTOFF"), { _dummy_first_bfd_reloc_code_real,
6652 BFD_RELOC_X86_64_PLTOFF64 },
6653 OPERAND_TYPE_IMM64 },
6654 { STRING_COMMA_LEN ("PLT"), { BFD_RELOC_386_PLT32,
6655 BFD_RELOC_X86_64_PLT32 },
6656 OPERAND_TYPE_IMM32_32S_DISP32 },
6657 { STRING_COMMA_LEN ("GOTPLT"), { _dummy_first_bfd_reloc_code_real,
6658 BFD_RELOC_X86_64_GOTPLT64 },
6659 OPERAND_TYPE_IMM64_DISP64 },
6660 { STRING_COMMA_LEN ("GOTOFF"), { BFD_RELOC_386_GOTOFF,
6661 BFD_RELOC_X86_64_GOTOFF64 },
6662 OPERAND_TYPE_IMM64_DISP64 },
6663 { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
6664 BFD_RELOC_X86_64_GOTPCREL },
6665 OPERAND_TYPE_IMM32_32S_DISP32 },
6666 { STRING_COMMA_LEN ("TLSGD"), { BFD_RELOC_386_TLS_GD,
6667 BFD_RELOC_X86_64_TLSGD },
6668 OPERAND_TYPE_IMM32_32S_DISP32 },
6669 { STRING_COMMA_LEN ("TLSLDM"), { BFD_RELOC_386_TLS_LDM,
6670 _dummy_first_bfd_reloc_code_real },
6671 OPERAND_TYPE_NONE },
6672 { STRING_COMMA_LEN ("TLSLD"), { _dummy_first_bfd_reloc_code_real,
6673 BFD_RELOC_X86_64_TLSLD },
6674 OPERAND_TYPE_IMM32_32S_DISP32 },
6675 { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
6676 BFD_RELOC_X86_64_GOTTPOFF },
6677 OPERAND_TYPE_IMM32_32S_DISP32 },
6678 { STRING_COMMA_LEN ("TPOFF"), { BFD_RELOC_386_TLS_LE_32,
6679 BFD_RELOC_X86_64_TPOFF32 },
6680 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6681 { STRING_COMMA_LEN ("NTPOFF"), { BFD_RELOC_386_TLS_LE,
6682 _dummy_first_bfd_reloc_code_real },
6683 OPERAND_TYPE_NONE },
6684 { STRING_COMMA_LEN ("DTPOFF"), { BFD_RELOC_386_TLS_LDO_32,
6685 BFD_RELOC_X86_64_DTPOFF32 },
6686 OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
6687 { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
6688 _dummy_first_bfd_reloc_code_real },
6689 OPERAND_TYPE_NONE },
6690 { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
6691 _dummy_first_bfd_reloc_code_real },
6692 OPERAND_TYPE_NONE },
6693 { STRING_COMMA_LEN ("GOT"), { BFD_RELOC_386_GOT32,
6694 BFD_RELOC_X86_64_GOT32 },
6695 OPERAND_TYPE_IMM32_32S_64_DISP32 },
6696 { STRING_COMMA_LEN ("TLSDESC"), { BFD_RELOC_386_TLS_GOTDESC,
6697 BFD_RELOC_X86_64_GOTPC32_TLSDESC },
6698 OPERAND_TYPE_IMM32_32S_DISP32 },
6699 { STRING_COMMA_LEN ("TLSCALL"), { BFD_RELOC_386_TLS_DESC_CALL,
6700 BFD_RELOC_X86_64_TLSDESC_CALL },
6701 OPERAND_TYPE_IMM32_32S_DISP32 },
6706 #if defined (OBJ_MAYBE_ELF)
6711 for (cp = input_line_pointer; *cp != '@'; cp++)
6712 if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
6715 for (j = 0; j < ARRAY_SIZE (gotrel); j++)
6717 int len = gotrel[j].len;
6718 if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
6720 if (gotrel[j].rel[object_64bit] != 0)
6723 char *tmpbuf, *past_reloc;
6725 *rel = gotrel[j].rel[object_64bit];
6731 if (flag_code != CODE_64BIT)
6733 types->bitfield.imm32 = 1;
6734 types->bitfield.disp32 = 1;
6737 *types = gotrel[j].types64;
6740 if (GOT_symbol == NULL)
6741 GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
6743 /* The length of the first part of our input line. */
6744 first = cp - input_line_pointer;
6746 /* The second part goes from after the reloc token until
6747 (and including) an end_of_line char or comma. */
6748 past_reloc = cp + 1 + len;
6750 while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
6752 second = cp + 1 - past_reloc;
6754 /* Allocate and copy string. The trailing NUL shouldn't
6755 be necessary, but be safe. */
6756 tmpbuf = (char *) xmalloc (first + second + 2);
6757 memcpy (tmpbuf, input_line_pointer, first);
6758 if (second != 0 && *past_reloc != ' ')
6759 /* Replace the relocation token with ' ', so that
6760 errors like foo@GOTOFF1 will be detected. */
6761 tmpbuf[first++] = ' ';
6762 memcpy (tmpbuf + first, past_reloc, second);
6763 tmpbuf[first + second] = '\0';
6767 as_bad (_("@%s reloc is not supported with %d-bit output format"),
6768 gotrel[j].str, 1 << (5 + object_64bit));
6773 /* Might be a symbol version string. Don't as_bad here. */
6779 x86_cons (expressionS *exp, int size)
6781 intel_syntax = -intel_syntax;
6784 if (size == 4 || (object_64bit && size == 8))
6786 /* Handle @GOTOFF and the like in an expression. */
6788 char *gotfree_input_line;
6791 save = input_line_pointer;
6792 gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
6793 if (gotfree_input_line)
6794 input_line_pointer = gotfree_input_line;
6798 if (gotfree_input_line)
6800 /* expression () has merrily parsed up to the end of line,
6801 or a comma - in the wrong buffer. Transfer how far
6802 input_line_pointer has moved to the right buffer. */
6803 input_line_pointer = (save
6804 + (input_line_pointer - gotfree_input_line)
6806 free (gotfree_input_line);
6807 if (exp->X_op == O_constant
6808 || exp->X_op == O_absent
6809 || exp->X_op == O_illegal
6810 || exp->X_op == O_register
6811 || exp->X_op == O_big)
6813 char c = *input_line_pointer;
6814 *input_line_pointer = 0;
6815 as_bad (_("missing or invalid expression `%s'"), save);
6816 *input_line_pointer = c;
6823 intel_syntax = -intel_syntax;
6826 i386_intel_simplify (exp);
6830 signed_cons (int size)
6832 if (flag_code == CODE_64BIT)
6840 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
6847 if (exp.X_op == O_symbol)
6848 exp.X_op = O_secrel;
6850 emit_expr (&exp, 4);
6852 while (*input_line_pointer++ == ',');
6854 input_line_pointer--;
6855 demand_empty_rest_of_line ();
6860 i386_immediate (char *imm_start)
6862 char *save_input_line_pointer;
6863 char *gotfree_input_line;
6866 i386_operand_type types;
6868 operand_type_set (&types, ~0);
6870 if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
6872 as_bad (_("at most %d immediate operands are allowed"),
6873 MAX_IMMEDIATE_OPERANDS);
6877 exp = &im_expressions[i.imm_operands++];
6878 i.op[this_operand].imms = exp;
6880 if (is_space_char (*imm_start))
6883 save_input_line_pointer = input_line_pointer;
6884 input_line_pointer = imm_start;
6886 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
6887 if (gotfree_input_line)
6888 input_line_pointer = gotfree_input_line;
6890 exp_seg = expression (exp);
6893 if (*input_line_pointer)
6894 as_bad (_("junk `%s' after expression"), input_line_pointer);
6896 input_line_pointer = save_input_line_pointer;
6897 if (gotfree_input_line)
6899 free (gotfree_input_line);
6901 if (exp->X_op == O_constant || exp->X_op == O_register)
6902 exp->X_op = O_illegal;
6905 return i386_finalize_immediate (exp_seg, exp, types, imm_start);
6909 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
6910 i386_operand_type types, const char *imm_start)
6912 if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
6915 as_bad (_("missing or invalid immediate expression `%s'"),
6919 else if (exp->X_op == O_constant)
6921 /* Size it properly later. */
6922 i.types[this_operand].bitfield.imm64 = 1;
6923 /* If not 64bit, sign extend val. */
6924 if (flag_code != CODE_64BIT
6925 && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
6927 = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
6929 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
6930 else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
6931 && exp_seg != absolute_section
6932 && exp_seg != text_section
6933 && exp_seg != data_section
6934 && exp_seg != bss_section
6935 && exp_seg != undefined_section
6936 && !bfd_is_com_section (exp_seg))
6938 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
6942 else if (!intel_syntax && exp->X_op == O_register)
6945 as_bad (_("illegal immediate register operand %s"), imm_start);
6950 /* This is an address. The size of the address will be
6951 determined later, depending on destination register,
6952 suffix, or the default for the section. */
6953 i.types[this_operand].bitfield.imm8 = 1;
6954 i.types[this_operand].bitfield.imm16 = 1;
6955 i.types[this_operand].bitfield.imm32 = 1;
6956 i.types[this_operand].bitfield.imm32s = 1;
6957 i.types[this_operand].bitfield.imm64 = 1;
6958 i.types[this_operand] = operand_type_and (i.types[this_operand],
6966 i386_scale (char *scale)
6969 char *save = input_line_pointer;
6971 input_line_pointer = scale;
6972 val = get_absolute_expression ();
6977 i.log2_scale_factor = 0;
6980 i.log2_scale_factor = 1;
6983 i.log2_scale_factor = 2;
6986 i.log2_scale_factor = 3;
6990 char sep = *input_line_pointer;
6992 *input_line_pointer = '\0';
6993 as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
6995 *input_line_pointer = sep;
6996 input_line_pointer = save;
7000 if (i.log2_scale_factor != 0 && i.index_reg == 0)
7002 as_warn (_("scale factor of %d without an index register"),
7003 1 << i.log2_scale_factor);
7004 i.log2_scale_factor = 0;
7006 scale = input_line_pointer;
7007 input_line_pointer = save;
7012 i386_displacement (char *disp_start, char *disp_end)
7016 char *save_input_line_pointer;
7017 char *gotfree_input_line;
7019 i386_operand_type bigdisp, types = anydisp;
7022 if (i.disp_operands == MAX_MEMORY_OPERANDS)
7024 as_bad (_("at most %d displacement operands are allowed"),
7025 MAX_MEMORY_OPERANDS);
7029 operand_type_set (&bigdisp, 0);
7030 if ((i.types[this_operand].bitfield.jumpabsolute)
7031 || (!current_templates->start->opcode_modifier.jump
7032 && !current_templates->start->opcode_modifier.jumpdword))
7034 bigdisp.bitfield.disp32 = 1;
7035 override = (i.prefix[ADDR_PREFIX] != 0);
7036 if (flag_code == CODE_64BIT)
7040 bigdisp.bitfield.disp32s = 1;
7041 bigdisp.bitfield.disp64 = 1;
7044 else if ((flag_code == CODE_16BIT) ^ override)
7046 bigdisp.bitfield.disp32 = 0;
7047 bigdisp.bitfield.disp16 = 1;
7052 /* For PC-relative branches, the width of the displacement
7053 is dependent upon data size, not address size. */
7054 override = (i.prefix[DATA_PREFIX] != 0);
7055 if (flag_code == CODE_64BIT)
7057 if (override || i.suffix == WORD_MNEM_SUFFIX)
7058 bigdisp.bitfield.disp16 = 1;
7061 bigdisp.bitfield.disp32 = 1;
7062 bigdisp.bitfield.disp32s = 1;
7068 override = (i.suffix == (flag_code != CODE_16BIT
7070 : LONG_MNEM_SUFFIX));
7071 bigdisp.bitfield.disp32 = 1;
7072 if ((flag_code == CODE_16BIT) ^ override)
7074 bigdisp.bitfield.disp32 = 0;
7075 bigdisp.bitfield.disp16 = 1;
7079 i.types[this_operand] = operand_type_or (i.types[this_operand],
7082 exp = &disp_expressions[i.disp_operands];
7083 i.op[this_operand].disps = exp;
7085 save_input_line_pointer = input_line_pointer;
7086 input_line_pointer = disp_start;
7087 END_STRING_AND_SAVE (disp_end);
7089 #ifndef GCC_ASM_O_HACK
7090 #define GCC_ASM_O_HACK 0
7093 END_STRING_AND_SAVE (disp_end + 1);
7094 if (i.types[this_operand].bitfield.baseIndex
7095 && displacement_string_end[-1] == '+')
7097 /* This hack is to avoid a warning when using the "o"
7098 constraint within gcc asm statements.
7101 #define _set_tssldt_desc(n,addr,limit,type) \
7102 __asm__ __volatile__ ( \
7104 "movw %w1,2+%0\n\t" \
7106 "movb %b1,4+%0\n\t" \
7107 "movb %4,5+%0\n\t" \
7108 "movb $0,6+%0\n\t" \
7109 "movb %h1,7+%0\n\t" \
7111 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
7113 This works great except that the output assembler ends
7114 up looking a bit weird if it turns out that there is
7115 no offset. You end up producing code that looks like:
7128 So here we provide the missing zero. */
7130 *displacement_string_end = '0';
7133 gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
7134 if (gotfree_input_line)
7135 input_line_pointer = gotfree_input_line;
7137 exp_seg = expression (exp);
7140 if (*input_line_pointer)
7141 as_bad (_("junk `%s' after expression"), input_line_pointer);
7143 RESTORE_END_STRING (disp_end + 1);
7145 input_line_pointer = save_input_line_pointer;
7146 if (gotfree_input_line)
7148 free (gotfree_input_line);
7150 if (exp->X_op == O_constant || exp->X_op == O_register)
7151 exp->X_op = O_illegal;
7154 ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
7156 RESTORE_END_STRING (disp_end);
7162 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
7163 i386_operand_type types, const char *disp_start)
7165 i386_operand_type bigdisp;
7168 /* We do this to make sure that the section symbol is in
7169 the symbol table. We will ultimately change the relocation
7170 to be relative to the beginning of the section. */
7171 if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
7172 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
7173 || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7175 if (exp->X_op != O_symbol)
7178 if (S_IS_LOCAL (exp->X_add_symbol)
7179 && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
7180 && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
7181 section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
7182 exp->X_op = O_subtract;
7183 exp->X_op_symbol = GOT_symbol;
7184 if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
7185 i.reloc[this_operand] = BFD_RELOC_32_PCREL;
7186 else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
7187 i.reloc[this_operand] = BFD_RELOC_64;
7189 i.reloc[this_operand] = BFD_RELOC_32;
7192 else if (exp->X_op == O_absent
7193 || exp->X_op == O_illegal
7194 || exp->X_op == O_big)
7197 as_bad (_("missing or invalid displacement expression `%s'"),
7202 else if (flag_code == CODE_64BIT
7203 && !i.prefix[ADDR_PREFIX]
7204 && exp->X_op == O_constant)
7206 /* Since displacement is signed extended to 64bit, don't allow
7207 disp32 and turn off disp32s if they are out of range. */
7208 i.types[this_operand].bitfield.disp32 = 0;
7209 if (!fits_in_signed_long (exp->X_add_number))
7211 i.types[this_operand].bitfield.disp32s = 0;
7212 if (i.types[this_operand].bitfield.baseindex)
7214 as_bad (_("0x%lx out range of signed 32bit displacement"),
7215 (long) exp->X_add_number);
7221 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
7222 else if (exp->X_op != O_constant
7223 && OUTPUT_FLAVOR == bfd_target_aout_flavour
7224 && exp_seg != absolute_section
7225 && exp_seg != text_section
7226 && exp_seg != data_section
7227 && exp_seg != bss_section
7228 && exp_seg != undefined_section
7229 && !bfd_is_com_section (exp_seg))
7231 as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
7236 /* Check if this is a displacement only operand. */
7237 bigdisp = i.types[this_operand];
7238 bigdisp.bitfield.disp8 = 0;
7239 bigdisp.bitfield.disp16 = 0;
7240 bigdisp.bitfield.disp32 = 0;
7241 bigdisp.bitfield.disp32s = 0;
7242 bigdisp.bitfield.disp64 = 0;
7243 if (operand_type_all_zero (&bigdisp))
7244 i.types[this_operand] = operand_type_and (i.types[this_operand],
7250 /* Make sure the memory operand we've been dealt is valid.
7251 Return 1 on success, 0 on a failure. */
7254 i386_index_check (const char *operand_string)
7257 const char *kind = "base/index";
7258 #if INFER_ADDR_PREFIX
7264 if (current_templates->start->opcode_modifier.isstring
7265 && !current_templates->start->opcode_modifier.immext
7266 && (current_templates->end[-1].opcode_modifier.isstring
7269 /* Memory operands of string insns are special in that they only allow
7270 a single register (rDI, rSI, or rBX) as their memory address. */
7271 unsigned int expected;
7273 kind = "string address";
7275 if (current_templates->start->opcode_modifier.w)
7277 i386_operand_type type = current_templates->end[-1].operand_types[0];
7279 if (!type.bitfield.baseindex
7280 || ((!i.mem_operands != !intel_syntax)
7281 && current_templates->end[-1].operand_types[1]
7282 .bitfield.baseindex))
7283 type = current_templates->end[-1].operand_types[1];
7284 expected = type.bitfield.esseg ? 7 /* rDI */ : 6 /* rSI */;
7287 expected = 3 /* rBX */;
7289 if (!i.base_reg || i.index_reg
7290 || operand_type_check (i.types[this_operand], disp))
7292 else if (!(flag_code == CODE_64BIT
7293 ? i.prefix[ADDR_PREFIX]
7294 ? i.base_reg->reg_type.bitfield.reg32
7295 : i.base_reg->reg_type.bitfield.reg64
7296 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7297 ? i.base_reg->reg_type.bitfield.reg32
7298 : i.base_reg->reg_type.bitfield.reg16))
7300 else if (i.base_reg->reg_num != expected)
7307 for (j = 0; j < i386_regtab_size; ++j)
7308 if ((flag_code == CODE_64BIT
7309 ? i.prefix[ADDR_PREFIX]
7310 ? i386_regtab[j].reg_type.bitfield.reg32
7311 : i386_regtab[j].reg_type.bitfield.reg64
7312 : (flag_code == CODE_16BIT) ^ !i.prefix[ADDR_PREFIX]
7313 ? i386_regtab[j].reg_type.bitfield.reg32
7314 : i386_regtab[j].reg_type.bitfield.reg16)
7315 && i386_regtab[j].reg_num == expected)
7317 gas_assert (j < i386_regtab_size);
7318 as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
7320 intel_syntax ? '[' : '(',
7322 i386_regtab[j].reg_name,
7323 intel_syntax ? ']' : ')');
7327 else if (flag_code == CODE_64BIT)
7330 && ((i.prefix[ADDR_PREFIX] == 0
7331 && !i.base_reg->reg_type.bitfield.reg64)
7332 || (i.prefix[ADDR_PREFIX]
7333 && !i.base_reg->reg_type.bitfield.reg32))
7335 || i.base_reg->reg_num !=
7336 (i.prefix[ADDR_PREFIX] == 0 ? RegRip : RegEip)))
7338 && !(i.index_reg->reg_type.bitfield.regxmm
7339 || i.index_reg->reg_type.bitfield.regymm)
7340 && (!i.index_reg->reg_type.bitfield.baseindex
7341 || (i.prefix[ADDR_PREFIX] == 0
7342 && i.index_reg->reg_num != RegRiz
7343 && !i.index_reg->reg_type.bitfield.reg64
7345 || (i.prefix[ADDR_PREFIX]
7346 && i.index_reg->reg_num != RegEiz
7347 && !i.index_reg->reg_type.bitfield.reg32))))
7352 if ((flag_code == CODE_16BIT) ^ (i.prefix[ADDR_PREFIX] != 0))
7356 && (!i.base_reg->reg_type.bitfield.reg16
7357 || !i.base_reg->reg_type.bitfield.baseindex))
7359 && (!i.index_reg->reg_type.bitfield.reg16
7360 || !i.index_reg->reg_type.bitfield.baseindex
7362 && i.base_reg->reg_num < 6
7363 && i.index_reg->reg_num >= 6
7364 && i.log2_scale_factor == 0))))
7371 && !i.base_reg->reg_type.bitfield.reg32)
7373 && !i.index_reg->reg_type.bitfield.regxmm
7374 && !i.index_reg->reg_type.bitfield.regymm
7375 && ((!i.index_reg->reg_type.bitfield.reg32
7376 && i.index_reg->reg_num != RegEiz)
7377 || !i.index_reg->reg_type.bitfield.baseindex)))
7383 #if INFER_ADDR_PREFIX
7384 if (!i.mem_operands && !i.prefix[ADDR_PREFIX])
7386 i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
7388 /* Change the size of any displacement too. At most one of
7389 Disp16 or Disp32 is set.
7390 FIXME. There doesn't seem to be any real need for separate
7391 Disp16 and Disp32 flags. The same goes for Imm16 and Imm32.
7392 Removing them would probably clean up the code quite a lot. */
7393 if (flag_code != CODE_64BIT
7394 && (i.types[this_operand].bitfield.disp16
7395 || i.types[this_operand].bitfield.disp32))
7396 i.types[this_operand]
7397 = operand_type_xor (i.types[this_operand], disp16_32);
7402 as_bad (_("`%s' is not a valid %s expression"),
7407 as_bad (_("`%s' is not a valid %s-bit %s expression"),
7409 flag_code_names[i.prefix[ADDR_PREFIX]
7410 ? flag_code == CODE_32BIT
7419 /* Parse OPERAND_STRING into the i386_insn structure I. Returns zero
7423 i386_att_operand (char *operand_string)
7427 char *op_string = operand_string;
7429 if (is_space_char (*op_string))
7432 /* We check for an absolute prefix (differentiating,
7433 for example, 'jmp pc_relative_label' from 'jmp *absolute_label'. */
7434 if (*op_string == ABSOLUTE_PREFIX)
7437 if (is_space_char (*op_string))
7439 i.types[this_operand].bitfield.jumpabsolute = 1;
7442 /* Check if operand is a register. */
7443 if ((r = parse_register (op_string, &end_op)) != NULL)
7445 i386_operand_type temp;
7447 /* Check for a segment override by searching for ':' after a
7448 segment register. */
7450 if (is_space_char (*op_string))
7452 if (*op_string == ':'
7453 && (r->reg_type.bitfield.sreg2
7454 || r->reg_type.bitfield.sreg3))
7459 i.seg[i.mem_operands] = &es;
7462 i.seg[i.mem_operands] = &cs;
7465 i.seg[i.mem_operands] = &ss;
7468 i.seg[i.mem_operands] = &ds;
7471 i.seg[i.mem_operands] = &fs;
7474 i.seg[i.mem_operands] = &gs;
7478 /* Skip the ':' and whitespace. */
7480 if (is_space_char (*op_string))
7483 if (!is_digit_char (*op_string)
7484 && !is_identifier_char (*op_string)
7485 && *op_string != '('
7486 && *op_string != ABSOLUTE_PREFIX)
7488 as_bad (_("bad memory operand `%s'"), op_string);
7491 /* Handle case of %es:*foo. */
7492 if (*op_string == ABSOLUTE_PREFIX)
7495 if (is_space_char (*op_string))
7497 i.types[this_operand].bitfield.jumpabsolute = 1;
7499 goto do_memory_reference;
7503 as_bad (_("junk `%s' after register"), op_string);
7507 temp.bitfield.baseindex = 0;
7508 i.types[this_operand] = operand_type_or (i.types[this_operand],
7510 i.types[this_operand].bitfield.unspecified = 0;
7511 i.op[this_operand].regs = r;
7514 else if (*op_string == REGISTER_PREFIX)
7516 as_bad (_("bad register name `%s'"), op_string);
7519 else if (*op_string == IMMEDIATE_PREFIX)
7522 if (i.types[this_operand].bitfield.jumpabsolute)
7524 as_bad (_("immediate operand illegal with absolute jump"));
7527 if (!i386_immediate (op_string))
7530 else if (is_digit_char (*op_string)
7531 || is_identifier_char (*op_string)
7532 || *op_string == '(')
7534 /* This is a memory reference of some sort. */
7537 /* Start and end of displacement string expression (if found). */
7538 char *displacement_string_start;
7539 char *displacement_string_end;
7541 do_memory_reference:
7542 if ((i.mem_operands == 1
7543 && !current_templates->start->opcode_modifier.isstring)
7544 || i.mem_operands == 2)
7546 as_bad (_("too many memory references for `%s'"),
7547 current_templates->start->name);
7551 /* Check for base index form. We detect the base index form by
7552 looking for an ')' at the end of the operand, searching
7553 for the '(' matching it, and finding a REGISTER_PREFIX or ','
7555 base_string = op_string + strlen (op_string);
7558 if (is_space_char (*base_string))
7561 /* If we only have a displacement, set-up for it to be parsed later. */
7562 displacement_string_start = op_string;
7563 displacement_string_end = base_string + 1;
7565 if (*base_string == ')')
7568 unsigned int parens_balanced = 1;
7569 /* We've already checked that the number of left & right ()'s are
7570 equal, so this loop will not be infinite. */
7574 if (*base_string == ')')
7576 if (*base_string == '(')
7579 while (parens_balanced);
7581 temp_string = base_string;
7583 /* Skip past '(' and whitespace. */
7585 if (is_space_char (*base_string))
7588 if (*base_string == ','
7589 || ((i.base_reg = parse_register (base_string, &end_op))
7592 displacement_string_end = temp_string;
7594 i.types[this_operand].bitfield.baseindex = 1;
7598 base_string = end_op;
7599 if (is_space_char (*base_string))
7603 /* There may be an index reg or scale factor here. */
7604 if (*base_string == ',')
7607 if (is_space_char (*base_string))
7610 if ((i.index_reg = parse_register (base_string, &end_op))
7613 base_string = end_op;
7614 if (is_space_char (*base_string))
7616 if (*base_string == ',')
7619 if (is_space_char (*base_string))
7622 else if (*base_string != ')')
7624 as_bad (_("expecting `,' or `)' "
7625 "after index register in `%s'"),
7630 else if (*base_string == REGISTER_PREFIX)
7632 as_bad (_("bad register name `%s'"), base_string);
7636 /* Check for scale factor. */
7637 if (*base_string != ')')
7639 char *end_scale = i386_scale (base_string);
7644 base_string = end_scale;
7645 if (is_space_char (*base_string))
7647 if (*base_string != ')')
7649 as_bad (_("expecting `)' "
7650 "after scale factor in `%s'"),
7655 else if (!i.index_reg)
7657 as_bad (_("expecting index register or scale factor "
7658 "after `,'; got '%c'"),
7663 else if (*base_string != ')')
7665 as_bad (_("expecting `,' or `)' "
7666 "after base register in `%s'"),
7671 else if (*base_string == REGISTER_PREFIX)
7673 as_bad (_("bad register name `%s'"), base_string);
7678 /* If there's an expression beginning the operand, parse it,
7679 assuming displacement_string_start and
7680 displacement_string_end are meaningful. */
7681 if (displacement_string_start != displacement_string_end)
7683 if (!i386_displacement (displacement_string_start,
7684 displacement_string_end))
7688 /* Special case for (%dx) while doing input/output op. */
7690 && operand_type_equal (&i.base_reg->reg_type,
7691 ®16_inoutportreg)
7693 && i.log2_scale_factor == 0
7694 && i.seg[i.mem_operands] == 0
7695 && !operand_type_check (i.types[this_operand], disp))
7697 i.types[this_operand] = inoutportreg;
7701 if (i386_index_check (operand_string) == 0)
7703 i.types[this_operand].bitfield.mem = 1;
7708 /* It's not a memory operand; argh! */
7709 as_bad (_("invalid char %s beginning operand %d `%s'"),
7710 output_invalid (*op_string),
7715 return 1; /* Normal return. */
7718 /* Calculate the maximum variable size (i.e., excluding fr_fix)
7719 that an rs_machine_dependent frag may reach. */
7722 i386_frag_max_var (fragS *frag)
7724 /* The only relaxable frags are for jumps.
7725 Unconditional jumps can grow by 4 bytes and others by 5 bytes. */
7726 gas_assert (frag->fr_type == rs_machine_dependent);
7727 return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
7730 /* md_estimate_size_before_relax()
7732 Called just before relax() for rs_machine_dependent frags. The x86
7733 assembler uses these frags to handle variable size jump
7736 Any symbol that is now undefined will not become defined.
7737 Return the correct fr_subtype in the frag.
7738 Return the initial "guess for variable size of frag" to caller.
7739 The guess is actually the growth beyond the fixed part. Whatever
7740 we do to grow the fixed or variable part contributes to our
7744 md_estimate_size_before_relax (fragS *fragP, segT segment)
7746 /* We've already got fragP->fr_subtype right; all we have to do is
7747 check for un-relaxable symbols. On an ELF system, we can't relax
7748 an externally visible symbol, because it may be overridden by a
7750 if (S_GET_SEGMENT (fragP->fr_symbol) != segment
7751 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7753 && (S_IS_EXTERNAL (fragP->fr_symbol)
7754 || S_IS_WEAK (fragP->fr_symbol)
7755 || ((symbol_get_bfdsym (fragP->fr_symbol)->flags
7756 & BSF_GNU_INDIRECT_FUNCTION))))
7758 #if defined (OBJ_COFF) && defined (TE_PE)
7759 || (OUTPUT_FLAVOR == bfd_target_coff_flavour
7760 && S_IS_WEAK (fragP->fr_symbol))
7764 /* Symbol is undefined in this segment, or we need to keep a
7765 reloc so that weak symbols can be overridden. */
7766 int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
7767 enum bfd_reloc_code_real reloc_type;
7768 unsigned char *opcode;
7771 if (fragP->fr_var != NO_RELOC)
7772 reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
7774 reloc_type = BFD_RELOC_16_PCREL;
7776 reloc_type = BFD_RELOC_32_PCREL;
7778 old_fr_fix = fragP->fr_fix;
7779 opcode = (unsigned char *) fragP->fr_opcode;
7781 switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
7784 /* Make jmp (0xeb) a (d)word displacement jump. */
7786 fragP->fr_fix += size;
7787 fix_new (fragP, old_fr_fix, size,
7789 fragP->fr_offset, 1,
7795 && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
7797 /* Negate the condition, and branch past an
7798 unconditional jump. */
7801 /* Insert an unconditional jump. */
7803 /* We added two extra opcode bytes, and have a two byte
7805 fragP->fr_fix += 2 + 2;
7806 fix_new (fragP, old_fr_fix + 2, 2,
7808 fragP->fr_offset, 1,
7815 if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
7820 fixP = fix_new (fragP, old_fr_fix, 1,
7822 fragP->fr_offset, 1,
7824 fixP->fx_signed = 1;
7828 /* This changes the byte-displacement jump 0x7N
7829 to the (d)word-displacement jump 0x0f,0x8N. */
7830 opcode[1] = opcode[0] + 0x10;
7831 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7832 /* We've added an opcode byte. */
7833 fragP->fr_fix += 1 + size;
7834 fix_new (fragP, old_fr_fix + 1, size,
7836 fragP->fr_offset, 1,
7841 BAD_CASE (fragP->fr_subtype);
7845 return fragP->fr_fix - old_fr_fix;
7848 /* Guess size depending on current relax state. Initially the relax
7849 state will correspond to a short jump and we return 1, because
7850 the variable part of the frag (the branch offset) is one byte
7851 long. However, we can relax a section more than once and in that
7852 case we must either set fr_subtype back to the unrelaxed state,
7853 or return the value for the appropriate branch. */
7854 return md_relax_table[fragP->fr_subtype].rlx_length;
7857 /* Called after relax() is finished.
7859 In: Address of frag.
7860 fr_type == rs_machine_dependent.
7861 fr_subtype is what the address relaxed to.
7863 Out: Any fixSs and constants are set up.
7864 Caller will turn frag into a ".space 0". */
7867 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
7870 unsigned char *opcode;
7871 unsigned char *where_to_put_displacement = NULL;
7872 offsetT target_address;
7873 offsetT opcode_address;
7874 unsigned int extension = 0;
7875 offsetT displacement_from_opcode_start;
7877 opcode = (unsigned char *) fragP->fr_opcode;
7879 /* Address we want to reach in file space. */
7880 target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
7882 /* Address opcode resides at in file space. */
7883 opcode_address = fragP->fr_address + fragP->fr_fix;
7885 /* Displacement from opcode start to fill into instruction. */
7886 displacement_from_opcode_start = target_address - opcode_address;
7888 if ((fragP->fr_subtype & BIG) == 0)
7890 /* Don't have to change opcode. */
7891 extension = 1; /* 1 opcode + 1 displacement */
7892 where_to_put_displacement = &opcode[1];
7896 if (no_cond_jump_promotion
7897 && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
7898 as_warn_where (fragP->fr_file, fragP->fr_line,
7899 _("long jump required"));
7901 switch (fragP->fr_subtype)
7903 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
7904 extension = 4; /* 1 opcode + 4 displacement */
7906 where_to_put_displacement = &opcode[1];
7909 case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
7910 extension = 2; /* 1 opcode + 2 displacement */
7912 where_to_put_displacement = &opcode[1];
7915 case ENCODE_RELAX_STATE (COND_JUMP, BIG):
7916 case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
7917 extension = 5; /* 2 opcode + 4 displacement */
7918 opcode[1] = opcode[0] + 0x10;
7919 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7920 where_to_put_displacement = &opcode[2];
7923 case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
7924 extension = 3; /* 2 opcode + 2 displacement */
7925 opcode[1] = opcode[0] + 0x10;
7926 opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
7927 where_to_put_displacement = &opcode[2];
7930 case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
7935 where_to_put_displacement = &opcode[3];
7939 BAD_CASE (fragP->fr_subtype);
7944 /* If size if less then four we are sure that the operand fits,
7945 but if it's 4, then it could be that the displacement is larger
7947 if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
7949 && ((addressT) (displacement_from_opcode_start - extension
7950 + ((addressT) 1 << 31))
7951 > (((addressT) 2 << 31) - 1)))
7953 as_bad_where (fragP->fr_file, fragP->fr_line,
7954 _("jump target out of range"));
7955 /* Make us emit 0. */
7956 displacement_from_opcode_start = extension;
7958 /* Now put displacement after opcode. */
7959 md_number_to_chars ((char *) where_to_put_displacement,
7960 (valueT) (displacement_from_opcode_start - extension),
7961 DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
7962 fragP->fr_fix += extension;
7965 /* Apply a fixup (fixP) to segment data, once it has been determined
7966 by our caller that we have all the info we need to fix it up.
7968 Parameter valP is the pointer to the value of the bits.
7970 On the 386, immediates, displacements, and data pointers are all in
7971 the same (little-endian) format, so we don't need to care about which
7975 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
7977 char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
7978 valueT value = *valP;
7980 #if !defined (TE_Mach)
7983 switch (fixP->fx_r_type)
7989 fixP->fx_r_type = BFD_RELOC_64_PCREL;
7992 case BFD_RELOC_X86_64_32S:
7993 fixP->fx_r_type = BFD_RELOC_32_PCREL;
7996 fixP->fx_r_type = BFD_RELOC_16_PCREL;
7999 fixP->fx_r_type = BFD_RELOC_8_PCREL;
8004 if (fixP->fx_addsy != NULL
8005 && (fixP->fx_r_type == BFD_RELOC_32_PCREL
8006 || fixP->fx_r_type == BFD_RELOC_64_PCREL
8007 || fixP->fx_r_type == BFD_RELOC_16_PCREL
8008 || fixP->fx_r_type == BFD_RELOC_8_PCREL)
8009 && !use_rela_relocations)
8011 /* This is a hack. There should be a better way to handle this.
8012 This covers for the fact that bfd_install_relocation will
8013 subtract the current location (for partial_inplace, PC relative
8014 relocations); see more below. */
8018 || OUTPUT_FLAVOR == bfd_target_coff_flavour
8021 value += fixP->fx_where + fixP->fx_frag->fr_address;
8023 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8026 segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
8029 || (symbol_section_p (fixP->fx_addsy)
8030 && sym_seg != absolute_section))
8031 && !generic_force_reloc (fixP))
8033 /* Yes, we add the values in twice. This is because
8034 bfd_install_relocation subtracts them out again. I think
8035 bfd_install_relocation is broken, but I don't dare change
8037 value += fixP->fx_where + fixP->fx_frag->fr_address;
8041 #if defined (OBJ_COFF) && defined (TE_PE)
8042 /* For some reason, the PE format does not store a
8043 section address offset for a PC relative symbol. */
8044 if (S_GET_SEGMENT (fixP->fx_addsy) != seg
8045 || S_IS_WEAK (fixP->fx_addsy))
8046 value += md_pcrel_from (fixP);
8049 #if defined (OBJ_COFF) && defined (TE_PE)
8050 if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8052 value -= S_GET_VALUE (fixP->fx_addsy);
8056 /* Fix a few things - the dynamic linker expects certain values here,
8057 and we must not disappoint it. */
8058 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8059 if (IS_ELF && fixP->fx_addsy)
8060 switch (fixP->fx_r_type)
8062 case BFD_RELOC_386_PLT32:
8063 case BFD_RELOC_X86_64_PLT32:
8064 /* Make the jump instruction point to the address of the operand. At
8065 runtime we merely add the offset to the actual PLT entry. */
8069 case BFD_RELOC_386_TLS_GD:
8070 case BFD_RELOC_386_TLS_LDM:
8071 case BFD_RELOC_386_TLS_IE_32:
8072 case BFD_RELOC_386_TLS_IE:
8073 case BFD_RELOC_386_TLS_GOTIE:
8074 case BFD_RELOC_386_TLS_GOTDESC:
8075 case BFD_RELOC_X86_64_TLSGD:
8076 case BFD_RELOC_X86_64_TLSLD:
8077 case BFD_RELOC_X86_64_GOTTPOFF:
8078 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
8079 value = 0; /* Fully resolved at runtime. No addend. */
8081 case BFD_RELOC_386_TLS_LE:
8082 case BFD_RELOC_386_TLS_LDO_32:
8083 case BFD_RELOC_386_TLS_LE_32:
8084 case BFD_RELOC_X86_64_DTPOFF32:
8085 case BFD_RELOC_X86_64_DTPOFF64:
8086 case BFD_RELOC_X86_64_TPOFF32:
8087 case BFD_RELOC_X86_64_TPOFF64:
8088 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8091 case BFD_RELOC_386_TLS_DESC_CALL:
8092 case BFD_RELOC_X86_64_TLSDESC_CALL:
8093 value = 0; /* Fully resolved at runtime. No addend. */
8094 S_SET_THREAD_LOCAL (fixP->fx_addsy);
8098 case BFD_RELOC_386_GOT32:
8099 case BFD_RELOC_X86_64_GOT32:
8100 value = 0; /* Fully resolved at runtime. No addend. */
8103 case BFD_RELOC_VTABLE_INHERIT:
8104 case BFD_RELOC_VTABLE_ENTRY:
8111 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) */
8113 #endif /* !defined (TE_Mach) */
8115 /* Are we finished with this relocation now? */
8116 if (fixP->fx_addsy == NULL)
8118 #if defined (OBJ_COFF) && defined (TE_PE)
8119 else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
8122 /* Remember value for tc_gen_reloc. */
8123 fixP->fx_addnumber = value;
8124 /* Clear out the frag for now. */
8128 else if (use_rela_relocations)
8130 fixP->fx_no_overflow = 1;
8131 /* Remember value for tc_gen_reloc. */
8132 fixP->fx_addnumber = value;
8136 md_number_to_chars (p, value, fixP->fx_size);
8140 md_atof (int type, char *litP, int *sizeP)
8142 /* This outputs the LITTLENUMs in REVERSE order;
8143 in accord with the bigendian 386. */
8144 return ieee_md_atof (type, litP, sizeP, FALSE);
8147 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
8150 output_invalid (int c)
8153 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8156 snprintf (output_invalid_buf, sizeof (output_invalid_buf),
8157 "(0x%x)", (unsigned char) c);
8158 return output_invalid_buf;
8161 /* REG_STRING starts *before* REGISTER_PREFIX. */
8163 static const reg_entry *
8164 parse_real_register (char *reg_string, char **end_op)
8166 char *s = reg_string;
8168 char reg_name_given[MAX_REG_NAME_SIZE + 1];
8171 /* Skip possible REGISTER_PREFIX and possible whitespace. */
8172 if (*s == REGISTER_PREFIX)
8175 if (is_space_char (*s))
8179 while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
8181 if (p >= reg_name_given + MAX_REG_NAME_SIZE)
8182 return (const reg_entry *) NULL;
8186 /* For naked regs, make sure that we are not dealing with an identifier.
8187 This prevents confusing an identifier like `eax_var' with register
8189 if (allow_naked_reg && identifier_chars[(unsigned char) *s])
8190 return (const reg_entry *) NULL;
8194 r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
8196 /* Handle floating point regs, allowing spaces in the (i) part. */
8197 if (r == i386_regtab /* %st is first entry of table */)
8199 if (is_space_char (*s))
8204 if (is_space_char (*s))
8206 if (*s >= '0' && *s <= '7')
8210 if (is_space_char (*s))
8215 r = (const reg_entry *) hash_find (reg_hash, "st(0)");
8220 /* We have "%st(" then garbage. */
8221 return (const reg_entry *) NULL;
8225 if (r == NULL || allow_pseudo_reg)
8228 if (operand_type_all_zero (&r->reg_type))
8229 return (const reg_entry *) NULL;
8231 if ((r->reg_type.bitfield.reg32
8232 || r->reg_type.bitfield.sreg3
8233 || r->reg_type.bitfield.control
8234 || r->reg_type.bitfield.debug
8235 || r->reg_type.bitfield.test)
8236 && !cpu_arch_flags.bitfield.cpui386)
8237 return (const reg_entry *) NULL;
8239 if (r->reg_type.bitfield.floatreg
8240 && !cpu_arch_flags.bitfield.cpu8087
8241 && !cpu_arch_flags.bitfield.cpu287
8242 && !cpu_arch_flags.bitfield.cpu387)
8243 return (const reg_entry *) NULL;
8245 if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpummx)
8246 return (const reg_entry *) NULL;
8248 if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpusse)
8249 return (const reg_entry *) NULL;
8251 if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuavx)
8252 return (const reg_entry *) NULL;
8254 /* Don't allow fake index register unless allow_index_reg isn't 0. */
8255 if (!allow_index_reg
8256 && (r->reg_num == RegEiz || r->reg_num == RegRiz))
8257 return (const reg_entry *) NULL;
8259 if (((r->reg_flags & (RegRex64 | RegRex))
8260 || r->reg_type.bitfield.reg64)
8261 && (!cpu_arch_flags.bitfield.cpulm
8262 || !operand_type_equal (&r->reg_type, &control))
8263 && flag_code != CODE_64BIT)
8264 return (const reg_entry *) NULL;
8266 if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
8267 return (const reg_entry *) NULL;
8272 /* REG_STRING starts *before* REGISTER_PREFIX. */
8274 static const reg_entry *
8275 parse_register (char *reg_string, char **end_op)
8279 if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
8280 r = parse_real_register (reg_string, end_op);
8285 char *save = input_line_pointer;
8289 input_line_pointer = reg_string;
8290 c = get_symbol_end ();
8291 symbolP = symbol_find (reg_string);
8292 if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
8294 const expressionS *e = symbol_get_value_expression (symbolP);
8296 know (e->X_op == O_register);
8297 know (e->X_add_number >= 0
8298 && (valueT) e->X_add_number < i386_regtab_size);
8299 r = i386_regtab + e->X_add_number;
8300 *end_op = input_line_pointer;
8302 *input_line_pointer = c;
8303 input_line_pointer = save;
8309 i386_parse_name (char *name, expressionS *e, char *nextcharP)
8312 char *end = input_line_pointer;
8315 r = parse_register (name, &input_line_pointer);
8316 if (r && end <= input_line_pointer)
8318 *nextcharP = *input_line_pointer;
8319 *input_line_pointer = 0;
8320 e->X_op = O_register;
8321 e->X_add_number = r - i386_regtab;
8324 input_line_pointer = end;
8326 return intel_syntax ? i386_intel_parse_name (name, e) : 0;
8330 md_operand (expressionS *e)
8335 switch (*input_line_pointer)
8337 case REGISTER_PREFIX:
8338 r = parse_real_register (input_line_pointer, &end);
8341 e->X_op = O_register;
8342 e->X_add_number = r - i386_regtab;
8343 input_line_pointer = end;
8348 gas_assert (intel_syntax);
8349 end = input_line_pointer++;
8351 if (*input_line_pointer == ']')
8353 ++input_line_pointer;
8354 e->X_op_symbol = make_expr_symbol (e);
8355 e->X_add_symbol = NULL;
8356 e->X_add_number = 0;
8362 input_line_pointer = end;
8369 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8370 const char *md_shortopts = "kVQ:sqn";
8372 const char *md_shortopts = "qn";
8375 #define OPTION_32 (OPTION_MD_BASE + 0)
8376 #define OPTION_64 (OPTION_MD_BASE + 1)
8377 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
8378 #define OPTION_MARCH (OPTION_MD_BASE + 3)
8379 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
8380 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
8381 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
8382 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
8383 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
8384 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
8385 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
8386 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
8387 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 12)
8388 #define OPTION_X32 (OPTION_MD_BASE + 13)
8390 struct option md_longopts[] =
8392 {"32", no_argument, NULL, OPTION_32},
8393 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8394 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8395 {"64", no_argument, NULL, OPTION_64},
8397 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8398 {"x32", no_argument, NULL, OPTION_X32},
8400 {"divide", no_argument, NULL, OPTION_DIVIDE},
8401 {"march", required_argument, NULL, OPTION_MARCH},
8402 {"mtune", required_argument, NULL, OPTION_MTUNE},
8403 {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
8404 {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
8405 {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
8406 {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
8407 {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
8408 {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
8409 {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
8410 {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
8411 {NULL, no_argument, NULL, 0}
8413 size_t md_longopts_size = sizeof (md_longopts);
8416 md_parse_option (int c, char *arg)
8424 optimize_align_code = 0;
8431 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8432 /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
8433 should be emitted or not. FIXME: Not implemented. */
8437 /* -V: SVR4 argument to print version ID. */
8439 print_version_id ();
8442 /* -k: Ignore for FreeBSD compatibility. */
8447 /* -s: On i386 Solaris, this tells the native assembler to use
8448 .stab instead of .stab.excl. We always use .stab anyhow. */
8451 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8452 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8455 const char **list, **l;
8457 list = bfd_target_list ();
8458 for (l = list; *l != NULL; l++)
8459 if (CONST_STRNEQ (*l, "elf64-x86-64")
8460 || strcmp (*l, "coff-x86-64") == 0
8461 || strcmp (*l, "pe-x86-64") == 0
8462 || strcmp (*l, "pei-x86-64") == 0
8463 || strcmp (*l, "mach-o-x86-64") == 0)
8465 default_arch = "x86_64";
8469 as_fatal (_("no compiled in support for x86_64"));
8475 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8479 const char **list, **l;
8481 list = bfd_target_list ();
8482 for (l = list; *l != NULL; l++)
8483 if (CONST_STRNEQ (*l, "elf32-x86-64"))
8485 default_arch = "x86_64:32";
8489 as_fatal (_("no compiled in support for 32bit x86_64"));
8493 as_fatal (_("32bit x86_64 is only supported for ELF"));
8498 default_arch = "i386";
8502 #ifdef SVR4_COMMENT_CHARS
8507 n = (char *) xmalloc (strlen (i386_comment_chars) + 1);
8509 for (s = i386_comment_chars; *s != '\0'; s++)
8513 i386_comment_chars = n;
8519 arch = xstrdup (arg);
8523 as_fatal (_("invalid -march= option: `%s'"), arg);
8524 next = strchr (arch, '+');
8527 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8529 if (strcmp (arch, cpu_arch [j].name) == 0)
8532 if (! cpu_arch[j].flags.bitfield.cpui386)
8535 cpu_arch_name = cpu_arch[j].name;
8536 cpu_sub_arch_name = NULL;
8537 cpu_arch_flags = cpu_arch[j].flags;
8538 cpu_arch_isa = cpu_arch[j].type;
8539 cpu_arch_isa_flags = cpu_arch[j].flags;
8540 if (!cpu_arch_tune_set)
8542 cpu_arch_tune = cpu_arch_isa;
8543 cpu_arch_tune_flags = cpu_arch_isa_flags;
8547 else if (*cpu_arch [j].name == '.'
8548 && strcmp (arch, cpu_arch [j].name + 1) == 0)
8550 /* ISA entension. */
8551 i386_cpu_flags flags;
8553 if (!cpu_arch[j].negated)
8554 flags = cpu_flags_or (cpu_arch_flags,
8557 flags = cpu_flags_and_not (cpu_arch_flags,
8559 if (!cpu_flags_equal (&flags, &cpu_arch_flags))
8561 if (cpu_sub_arch_name)
8563 char *name = cpu_sub_arch_name;
8564 cpu_sub_arch_name = concat (name,
8566 (const char *) NULL);
8570 cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
8571 cpu_arch_flags = flags;
8572 cpu_arch_isa_flags = flags;
8578 if (j >= ARRAY_SIZE (cpu_arch))
8579 as_fatal (_("invalid -march= option: `%s'"), arg);
8583 while (next != NULL );
8588 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8589 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8591 if (strcmp (arg, cpu_arch [j].name) == 0)
8593 cpu_arch_tune_set = 1;
8594 cpu_arch_tune = cpu_arch [j].type;
8595 cpu_arch_tune_flags = cpu_arch[j].flags;
8599 if (j >= ARRAY_SIZE (cpu_arch))
8600 as_fatal (_("invalid -mtune= option: `%s'"), arg);
8603 case OPTION_MMNEMONIC:
8604 if (strcasecmp (arg, "att") == 0)
8606 else if (strcasecmp (arg, "intel") == 0)
8609 as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
8612 case OPTION_MSYNTAX:
8613 if (strcasecmp (arg, "att") == 0)
8615 else if (strcasecmp (arg, "intel") == 0)
8618 as_fatal (_("invalid -msyntax= option: `%s'"), arg);
8621 case OPTION_MINDEX_REG:
8622 allow_index_reg = 1;
8625 case OPTION_MNAKED_REG:
8626 allow_naked_reg = 1;
8629 case OPTION_MOLD_GCC:
8633 case OPTION_MSSE2AVX:
8637 case OPTION_MSSE_CHECK:
8638 if (strcasecmp (arg, "error") == 0)
8639 sse_check = sse_check_error;
8640 else if (strcasecmp (arg, "warning") == 0)
8641 sse_check = sse_check_warning;
8642 else if (strcasecmp (arg, "none") == 0)
8643 sse_check = sse_check_none;
8645 as_fatal (_("invalid -msse-check= option: `%s'"), arg);
8648 case OPTION_MAVXSCALAR:
8649 if (strcasecmp (arg, "128") == 0)
8651 else if (strcasecmp (arg, "256") == 0)
8654 as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
8663 #define MESSAGE_TEMPLATE \
8667 show_arch (FILE *stream, int ext, int check)
8669 static char message[] = MESSAGE_TEMPLATE;
8670 char *start = message + 27;
8672 int size = sizeof (MESSAGE_TEMPLATE);
8679 left = size - (start - message);
8680 for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
8682 /* Should it be skipped? */
8683 if (cpu_arch [j].skip)
8686 name = cpu_arch [j].name;
8687 len = cpu_arch [j].len;
8690 /* It is an extension. Skip if we aren't asked to show it. */
8701 /* It is an processor. Skip if we show only extension. */
8704 else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
8706 /* It is an impossible processor - skip. */
8710 /* Reserve 2 spaces for ", " or ",\0" */
8713 /* Check if there is any room. */
8721 p = mempcpy (p, name, len);
8725 /* Output the current message now and start a new one. */
8728 fprintf (stream, "%s\n", message);
8730 left = size - (start - message) - len - 2;
8732 gas_assert (left >= 0);
8734 p = mempcpy (p, name, len);
8739 fprintf (stream, "%s\n", message);
8743 md_show_usage (FILE *stream)
8745 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8746 fprintf (stream, _("\
8748 -V print assembler version number\n\
8751 fprintf (stream, _("\
8752 -n Do not optimize code alignment\n\
8753 -q quieten some warnings\n"));
8754 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
8755 fprintf (stream, _("\
8758 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8759 || defined (TE_PE) || defined (TE_PEP))
8760 fprintf (stream, _("\
8761 --32/--64/--x32 generate 32bit/64bit/x32 code\n"));
8763 #ifdef SVR4_COMMENT_CHARS
8764 fprintf (stream, _("\
8765 --divide do not treat `/' as a comment character\n"));
8767 fprintf (stream, _("\
8768 --divide ignored\n"));
8770 fprintf (stream, _("\
8771 -march=CPU[,+EXTENSION...]\n\
8772 generate code for CPU and EXTENSION, CPU is one of:\n"));
8773 show_arch (stream, 0, 1);
8774 fprintf (stream, _("\
8775 EXTENSION is combination of:\n"));
8776 show_arch (stream, 1, 0);
8777 fprintf (stream, _("\
8778 -mtune=CPU optimize for CPU, CPU is one of:\n"));
8779 show_arch (stream, 0, 0);
8780 fprintf (stream, _("\
8781 -msse2avx encode SSE instructions with VEX prefix\n"));
8782 fprintf (stream, _("\
8783 -msse-check=[none|error|warning]\n\
8784 check SSE instructions\n"));
8785 fprintf (stream, _("\
8786 -mavxscalar=[128|256] encode scalar AVX instructions with specific vector\n\
8788 fprintf (stream, _("\
8789 -mmnemonic=[att|intel] use AT&T/Intel mnemonic\n"));
8790 fprintf (stream, _("\
8791 -msyntax=[att|intel] use AT&T/Intel syntax\n"));
8792 fprintf (stream, _("\
8793 -mindex-reg support pseudo index registers\n"));
8794 fprintf (stream, _("\
8795 -mnaked-reg don't require `%%' prefix for registers\n"));
8796 fprintf (stream, _("\
8797 -mold-gcc support old (<= 2.8.1) versions of gcc\n"));
8800 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
8801 || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
8802 || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
8804 /* Pick the target format to use. */
8807 i386_target_format (void)
8809 if (!strncmp (default_arch, "x86_64", 6))
8811 update_code_flag (CODE_64BIT, 1);
8812 if (default_arch[6] == '\0')
8813 x86_elf_abi = X86_64_ABI;
8815 x86_elf_abi = X86_64_X32_ABI;
8817 else if (!strcmp (default_arch, "i386"))
8818 update_code_flag (CODE_32BIT, 1);
8820 as_fatal (_("unknown architecture"));
8822 if (cpu_flags_all_zero (&cpu_arch_isa_flags))
8823 cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8824 if (cpu_flags_all_zero (&cpu_arch_tune_flags))
8825 cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
8827 switch (OUTPUT_FLAVOR)
8829 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
8830 case bfd_target_aout_flavour:
8831 return AOUT_TARGET_FORMAT;
8833 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
8834 # if defined (TE_PE) || defined (TE_PEP)
8835 case bfd_target_coff_flavour:
8836 return flag_code == CODE_64BIT ? "pe-x86-64" : "pe-i386";
8837 # elif defined (TE_GO32)
8838 case bfd_target_coff_flavour:
8841 case bfd_target_coff_flavour:
8845 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
8846 case bfd_target_elf_flavour:
8850 switch (x86_elf_abi)
8853 format = ELF_TARGET_FORMAT;
8856 use_rela_relocations = 1;
8858 format = ELF_TARGET_FORMAT64;
8860 case X86_64_X32_ABI:
8861 use_rela_relocations = 1;
8863 disallow_64bit_reloc = 1;
8864 format = ELF_TARGET_FORMAT32;
8867 if (cpu_arch_isa == PROCESSOR_L1OM)
8869 if (x86_elf_abi != X86_64_ABI)
8870 as_fatal (_("Intel L1OM is 64bit only"));
8871 return ELF_TARGET_L1OM_FORMAT;
8873 if (cpu_arch_isa == PROCESSOR_K1OM)
8875 if (x86_elf_abi != X86_64_ABI)
8876 as_fatal (_("Intel K1OM is 64bit only"));
8877 return ELF_TARGET_K1OM_FORMAT;
8883 #if defined (OBJ_MACH_O)
8884 case bfd_target_mach_o_flavour:
8885 if (flag_code == CODE_64BIT)
8887 use_rela_relocations = 1;
8889 return "mach-o-x86-64";
8892 return "mach-o-i386";
8900 #endif /* OBJ_MAYBE_ more than one */
8902 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))
8904 i386_elf_emit_arch_note (void)
8906 if (IS_ELF && cpu_arch_name != NULL)
8909 asection *seg = now_seg;
8910 subsegT subseg = now_subseg;
8911 Elf_Internal_Note i_note;
8912 Elf_External_Note e_note;
8913 asection *note_secp;
8916 /* Create the .note section. */
8917 note_secp = subseg_new (".note", 0);
8918 bfd_set_section_flags (stdoutput,
8920 SEC_HAS_CONTENTS | SEC_READONLY);
8922 /* Process the arch string. */
8923 len = strlen (cpu_arch_name);
8925 i_note.namesz = len + 1;
8927 i_note.type = NT_ARCH;
8928 p = frag_more (sizeof (e_note.namesz));
8929 md_number_to_chars (p, (valueT) i_note.namesz, sizeof (e_note.namesz));
8930 p = frag_more (sizeof (e_note.descsz));
8931 md_number_to_chars (p, (valueT) i_note.descsz, sizeof (e_note.descsz));
8932 p = frag_more (sizeof (e_note.type));
8933 md_number_to_chars (p, (valueT) i_note.type, sizeof (e_note.type));
8934 p = frag_more (len + 1);
8935 strcpy (p, cpu_arch_name);
8937 frag_align (2, 0, 0);
8939 subseg_set (seg, subseg);
8945 md_undefined_symbol (char *name)
8947 if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
8948 && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
8949 && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
8950 && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
8954 if (symbol_find (name))
8955 as_bad (_("GOT already in symbol table"));
8956 GOT_symbol = symbol_new (name, undefined_section,
8957 (valueT) 0, &zero_address_frag);
8964 /* Round up a section size to the appropriate boundary. */
8967 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
8969 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8970 if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
8972 /* For a.out, force the section size to be aligned. If we don't do
8973 this, BFD will align it for us, but it will not write out the
8974 final bytes of the section. This may be a bug in BFD, but it is
8975 easier to fix it here since that is how the other a.out targets
8979 align = bfd_get_section_alignment (stdoutput, segment);
8980 size = ((size + (1 << align) - 1) & ((valueT) -1 << align));
8987 /* On the i386, PC-relative offsets are relative to the start of the
8988 next instruction. That is, the address of the offset, plus its
8989 size, since the offset is always the last part of the insn. */
8992 md_pcrel_from (fixS *fixP)
8994 return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
9000 s_bss (int ignore ATTRIBUTE_UNUSED)
9004 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9006 obj_elf_section_change_hook ();
9008 temp = get_absolute_expression ();
9009 subseg_set (bss_section, (subsegT) temp);
9010 demand_empty_rest_of_line ();
9016 i386_validate_fix (fixS *fixp)
9018 if (fixp->fx_subsy && fixp->fx_subsy == GOT_symbol)
9020 if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
9024 fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
9029 fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
9031 fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
9038 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
9041 bfd_reloc_code_real_type code;
9043 switch (fixp->fx_r_type)
9045 case BFD_RELOC_X86_64_PLT32:
9046 case BFD_RELOC_X86_64_GOT32:
9047 case BFD_RELOC_X86_64_GOTPCREL:
9048 case BFD_RELOC_386_PLT32:
9049 case BFD_RELOC_386_GOT32:
9050 case BFD_RELOC_386_GOTOFF:
9051 case BFD_RELOC_386_GOTPC:
9052 case BFD_RELOC_386_TLS_GD:
9053 case BFD_RELOC_386_TLS_LDM:
9054 case BFD_RELOC_386_TLS_LDO_32:
9055 case BFD_RELOC_386_TLS_IE_32:
9056 case BFD_RELOC_386_TLS_IE:
9057 case BFD_RELOC_386_TLS_GOTIE:
9058 case BFD_RELOC_386_TLS_LE_32:
9059 case BFD_RELOC_386_TLS_LE:
9060 case BFD_RELOC_386_TLS_GOTDESC:
9061 case BFD_RELOC_386_TLS_DESC_CALL:
9062 case BFD_RELOC_X86_64_TLSGD:
9063 case BFD_RELOC_X86_64_TLSLD:
9064 case BFD_RELOC_X86_64_DTPOFF32:
9065 case BFD_RELOC_X86_64_DTPOFF64:
9066 case BFD_RELOC_X86_64_GOTTPOFF:
9067 case BFD_RELOC_X86_64_TPOFF32:
9068 case BFD_RELOC_X86_64_TPOFF64:
9069 case BFD_RELOC_X86_64_GOTOFF64:
9070 case BFD_RELOC_X86_64_GOTPC32:
9071 case BFD_RELOC_X86_64_GOT64:
9072 case BFD_RELOC_X86_64_GOTPCREL64:
9073 case BFD_RELOC_X86_64_GOTPC64:
9074 case BFD_RELOC_X86_64_GOTPLT64:
9075 case BFD_RELOC_X86_64_PLTOFF64:
9076 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9077 case BFD_RELOC_X86_64_TLSDESC_CALL:
9079 case BFD_RELOC_VTABLE_ENTRY:
9080 case BFD_RELOC_VTABLE_INHERIT:
9082 case BFD_RELOC_32_SECREL:
9084 code = fixp->fx_r_type;
9086 case BFD_RELOC_X86_64_32S:
9087 if (!fixp->fx_pcrel)
9089 /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32. */
9090 code = fixp->fx_r_type;
9096 switch (fixp->fx_size)
9099 as_bad_where (fixp->fx_file, fixp->fx_line,
9100 _("can not do %d byte pc-relative relocation"),
9102 code = BFD_RELOC_32_PCREL;
9104 case 1: code = BFD_RELOC_8_PCREL; break;
9105 case 2: code = BFD_RELOC_16_PCREL; break;
9106 case 4: code = BFD_RELOC_32_PCREL; break;
9108 case 8: code = BFD_RELOC_64_PCREL; break;
9114 switch (fixp->fx_size)
9117 as_bad_where (fixp->fx_file, fixp->fx_line,
9118 _("can not do %d byte relocation"),
9120 code = BFD_RELOC_32;
9122 case 1: code = BFD_RELOC_8; break;
9123 case 2: code = BFD_RELOC_16; break;
9124 case 4: code = BFD_RELOC_32; break;
9126 case 8: code = BFD_RELOC_64; break;
9133 if ((code == BFD_RELOC_32
9134 || code == BFD_RELOC_32_PCREL
9135 || code == BFD_RELOC_X86_64_32S)
9137 && fixp->fx_addsy == GOT_symbol)
9140 code = BFD_RELOC_386_GOTPC;
9142 code = BFD_RELOC_X86_64_GOTPC32;
9144 if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
9146 && fixp->fx_addsy == GOT_symbol)
9148 code = BFD_RELOC_X86_64_GOTPC64;
9151 rel = (arelent *) xmalloc (sizeof (arelent));
9152 rel->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
9153 *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
9155 rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
9157 if (!use_rela_relocations)
9159 /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
9160 vtable entry to be used in the relocation's section offset. */
9161 if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
9162 rel->address = fixp->fx_offset;
9163 #if defined (OBJ_COFF) && defined (TE_PE)
9164 else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
9165 rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
9170 /* Use the rela in 64bit mode. */
9173 if (disallow_64bit_reloc)
9176 case BFD_RELOC_X86_64_DTPOFF64:
9177 case BFD_RELOC_X86_64_TPOFF64:
9178 case BFD_RELOC_64_PCREL:
9179 case BFD_RELOC_X86_64_GOTOFF64:
9180 case BFD_RELOC_X86_64_GOT64:
9181 case BFD_RELOC_X86_64_GOTPCREL64:
9182 case BFD_RELOC_X86_64_GOTPC64:
9183 case BFD_RELOC_X86_64_GOTPLT64:
9184 case BFD_RELOC_X86_64_PLTOFF64:
9185 as_bad_where (fixp->fx_file, fixp->fx_line,
9186 _("cannot represent relocation type %s in x32 mode"),
9187 bfd_get_reloc_code_name (code));
9193 if (!fixp->fx_pcrel)
9194 rel->addend = fixp->fx_offset;
9198 case BFD_RELOC_X86_64_PLT32:
9199 case BFD_RELOC_X86_64_GOT32:
9200 case BFD_RELOC_X86_64_GOTPCREL:
9201 case BFD_RELOC_X86_64_TLSGD:
9202 case BFD_RELOC_X86_64_TLSLD:
9203 case BFD_RELOC_X86_64_GOTTPOFF:
9204 case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9205 case BFD_RELOC_X86_64_TLSDESC_CALL:
9206 rel->addend = fixp->fx_offset - fixp->fx_size;
9209 rel->addend = (section->vma
9211 + fixp->fx_addnumber
9212 + md_pcrel_from (fixp));
9217 rel->howto = bfd_reloc_type_lookup (stdoutput, code);
9218 if (rel->howto == NULL)
9220 as_bad_where (fixp->fx_file, fixp->fx_line,
9221 _("cannot represent relocation type %s"),
9222 bfd_get_reloc_code_name (code));
9223 /* Set howto to a garbage value so that we can keep going. */
9224 rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
9225 gas_assert (rel->howto != NULL);
9231 #include "tc-i386-intel.c"
9234 tc_x86_parse_to_dw2regnum (expressionS *exp)
9236 int saved_naked_reg;
9237 char saved_register_dot;
9239 saved_naked_reg = allow_naked_reg;
9240 allow_naked_reg = 1;
9241 saved_register_dot = register_chars['.'];
9242 register_chars['.'] = '.';
9243 allow_pseudo_reg = 1;
9244 expression_and_evaluate (exp);
9245 allow_pseudo_reg = 0;
9246 register_chars['.'] = saved_register_dot;
9247 allow_naked_reg = saved_naked_reg;
9249 if (exp->X_op == O_register && exp->X_add_number >= 0)
9251 if ((addressT) exp->X_add_number < i386_regtab_size)
9253 exp->X_op = O_constant;
9254 exp->X_add_number = i386_regtab[exp->X_add_number]
9255 .dw2_regnum[flag_code >> 1];
9258 exp->X_op = O_illegal;
9263 tc_x86_frame_initial_instructions (void)
9265 static unsigned int sp_regno[2];
9267 if (!sp_regno[flag_code >> 1])
9269 char *saved_input = input_line_pointer;
9270 char sp[][4] = {"esp", "rsp"};
9273 input_line_pointer = sp[flag_code >> 1];
9274 tc_x86_parse_to_dw2regnum (&exp);
9275 gas_assert (exp.X_op == O_constant);
9276 sp_regno[flag_code >> 1] = exp.X_add_number;
9277 input_line_pointer = saved_input;
9280 cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
9281 cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
9285 x86_dwarf2_addr_size (void)
9287 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
9288 if (x86_elf_abi == X86_64_X32_ABI)
9291 return bfd_arch_bits_per_address (stdoutput) / 8;
9295 i386_elf_section_type (const char *str, size_t len)
9297 if (flag_code == CODE_64BIT
9298 && len == sizeof ("unwind") - 1
9299 && strncmp (str, "unwind", 6) == 0)
9300 return SHT_X86_64_UNWIND;
9307 i386_solaris_fix_up_eh_frame (segT sec)
9309 if (flag_code == CODE_64BIT)
9310 elf_section_type (sec) = SHT_X86_64_UNWIND;
9316 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
9320 exp.X_op = O_secrel;
9321 exp.X_add_symbol = symbol;
9322 exp.X_add_number = 0;
9323 emit_expr (&exp, size);
9327 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9328 /* For ELF on x86-64, add support for SHF_X86_64_LARGE. */
9331 x86_64_section_letter (int letter, char **ptr_msg)
9333 if (flag_code == CODE_64BIT)
9336 return SHF_X86_64_LARGE;
9338 *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
9341 *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
9346 x86_64_section_word (char *str, size_t len)
9348 if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
9349 return SHF_X86_64_LARGE;
9355 handle_large_common (int small ATTRIBUTE_UNUSED)
9357 if (flag_code != CODE_64BIT)
9359 s_comm_internal (0, elf_common_parse);
9360 as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
9364 static segT lbss_section;
9365 asection *saved_com_section_ptr = elf_com_section_ptr;
9366 asection *saved_bss_section = bss_section;
9368 if (lbss_section == NULL)
9370 flagword applicable;
9372 subsegT subseg = now_subseg;
9374 /* The .lbss section is for local .largecomm symbols. */
9375 lbss_section = subseg_new (".lbss", 0);
9376 applicable = bfd_applicable_section_flags (stdoutput);
9377 bfd_set_section_flags (stdoutput, lbss_section,
9378 applicable & SEC_ALLOC);
9379 seg_info (lbss_section)->bss = 1;
9381 subseg_set (seg, subseg);
9384 elf_com_section_ptr = &_bfd_elf_large_com_section;
9385 bss_section = lbss_section;
9387 s_comm_internal (0, elf_common_parse);
9389 elf_com_section_ptr = saved_com_section_ptr;
9390 bss_section = saved_bss_section;
9393 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */